diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 10:15:43 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 10:15:43 +0000 |
commit | 4a1c948bc688e5ab5e51d12d2f6dd6746e3ac2d9 (patch) | |
tree | 7ee50e6ccf13f75f02fd343cf90750cbd9950194 /drivers | |
parent | Releasing progress-linux version 6.1.82-1~progress6.99u1. (diff) | |
download | linux-4a1c948bc688e5ab5e51d12d2f6dd6746e3ac2d9.tar.xz linux-4a1c948bc688e5ab5e51d12d2f6dd6746e3ac2d9.zip |
Merging upstream version 6.1.85.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers')
426 files changed, 4510 insertions, 2130 deletions
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c index eea2a2fa4..45f906103 100644 --- a/drivers/accessibility/speakup/synth.c +++ b/drivers/accessibility/speakup/synth.c @@ -208,8 +208,10 @@ void spk_do_flush(void) wake_up_process(speakup_task); } -void synth_write(const char *buf, size_t count) +void synth_write(const char *_buf, size_t count) { + const unsigned char *buf = (const unsigned char *) _buf; + while (count--) synth_buffer_add(*buf++); synth_start(); diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index b91155ea9..c9131259f 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle, ACPI_FREE(buffer.pointer); buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - acpi_evaluate_object(obj_handle, NULL, NULL, &buffer); - + status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer); + if (ACPI_FAILURE(status)) { + acpi_os_printf("Could Not evaluate object %p\n", + obj_handle); + return (AE_OK); + } /* * Since this is a field unit, surround the output in braces */ diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fc5b5b2c9..6f613eef2 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1431,6 +1431,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr) acpi_processor_registered--; if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + kfree(dev); } pr->flags.power_setup_done = 0; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 5ebeb0d7b..1c5c1a269 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -543,6 +543,39 @@ static const struct dmi_system_id lg_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "17U70P"), }, }, + { + /* Infinity E15-5A165-BM */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"), + }, + }, + { + /* Infinity E15-5A305-1M */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"), + }, + }, + { + /* Lunnen Ground 15 / AMD Ryzen 5 5500U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"), + DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"), + }, + }, + { + /* Lunnen Ground 16 / AMD Ryzen 7 5800U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"), + DMI_MATCH(DMI_BOARD_NAME, "LL6FA"), + }, + }, + { + /* MAIBENBEN X577 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"), + DMI_MATCH(DMI_BOARD_NAME, "X577"), + }, + }, { } }; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 94154a849..293cdf486 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -315,18 +315,14 @@ static int acpi_scan_device_check(struct acpi_device *adev) * again). */ if (adev->handler) { - dev_warn(&adev->dev, "Already enumerated\n"); - return -EALREADY; + dev_dbg(&adev->dev, "Already enumerated\n"); + return 0; } error = acpi_bus_scan(adev->handle); if (error) { dev_warn(&adev->dev, "Namespace scan failure\n"); return error; } - if (!adev->handler) { - dev_warn(&adev->dev, "Enumeration failure\n"); - error = -ENODEV; - } } else { error = acpi_scan_device_not_present(adev); } diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1790a2ecb..17119e8dc 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -671,11 +671,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets"); static void ahci_pci_save_initial_config(struct pci_dev *pdev, struct ahci_host_priv *hpriv) { - if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) { - dev_info(&pdev->dev, "ASM1166 has only six ports\n"); - hpriv->saved_port_map = 0x3f; - } - if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { dev_info(&pdev->dev, "JMB361 has only one port\n"); hpriv->saved_port_map = 1; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 1eaaf0141..b8034d194 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -711,8 +711,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) ehc->saved_ncq_enabled |= 1 << devno; /* If we are resuming, wake up the device */ - if (ap->pflags & ATA_PFLAG_RESUMING) + if (ap->pflags & ATA_PFLAG_RESUMING) { + dev->flags |= ATA_DFLAG_RESUMING; ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE; + } } } @@ -3089,6 +3091,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, return 0; err: + dev->flags &= ~ATA_DFLAG_RESUMING; *r_failed_dev = dev; return rc; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a9da2f05e..a09548630 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -4652,6 +4652,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) struct ata_link *link; struct ata_device *dev; unsigned long flags; + bool do_resume; int ret = 0; mutex_lock(&ap->scsi_scan_mutex); @@ -4673,7 +4674,15 @@ void ata_scsi_dev_rescan(struct work_struct *work) if (scsi_device_get(sdev)) continue; + do_resume = dev->flags & ATA_DFLAG_RESUMING; + spin_unlock_irqrestore(ap->lock, flags); + if (do_resume) { + ret = scsi_resume_device(sdev); + if (ret == -EWOULDBLOCK) + goto unlock; + dev->flags &= ~ATA_DFLAG_RESUMING; + } ret = scsi_rescan_device(sdev); scsi_device_put(sdev); spin_lock_irqsave(ap->lock, flags); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 17f9062b0..9cf540017 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = { }, }; -static const struct pci_device_id mv_pci_tbl[] = { - { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, - { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, - { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, - { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, - /* RocketRAID 1720/174x have different identifiers */ - { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, - { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, - { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, - - { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, - { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, - { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, - { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, - { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, - - { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, - - /* Adaptec 1430SA */ - { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, - - /* Marvell 7042 support */ - { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, - - /* Highpoint RocketRAID PCIe series */ - { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, - { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, - - { } /* terminate list */ -}; - static const struct mv_hw_ops mv5xxx_ops = { .phy_errata = mv5_phy_errata, .enable_leds = mv5_enable_leds, @@ -4301,6 +4270,36 @@ static int mv_pci_init_one(struct pci_dev *pdev, static int mv_pci_device_resume(struct pci_dev *pdev); #endif +static const struct pci_device_id mv_pci_tbl[] = { + { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, + { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, + { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, + { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, + /* RocketRAID 1720/174x have different identifiers */ + { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, + { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, + { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, + + { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, + { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, + { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, + { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, + { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, + + { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, + + /* Adaptec 1430SA */ + { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, + + /* Marvell 7042 support */ + { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, + + /* Highpoint RocketRAID PCIe series */ + { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, + { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, + + { } /* terminate list */ +}; static struct pci_driver mv_pci_driver = { .name = DRV_NAME, @@ -4313,6 +4312,7 @@ static struct pci_driver mv_pci_driver = { #endif }; +MODULE_DEVICE_TABLE(pci, mv_pci_tbl); /** * mv_print_info - Dump key info to kernel log for perusal. @@ -4485,7 +4485,6 @@ static void __exit mv_exit(void) MODULE_AUTHOR("Brett Russ"); MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); MODULE_LICENSE("GPL v2"); -MODULE_DEVICE_TABLE(pci, mv_pci_tbl); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 6ceec59cb..fa1966638 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -958,8 +958,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, offset -= (idx * window_size); idx++; - dist = ((long) (window_size - (offset + size))) >= 0 ? size : - (long) (window_size - offset); + dist = min(size, window_size - offset); memcpy_fromio(psource, dimm_mmio + offset / 4, dist); psource += dist; @@ -1006,8 +1005,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, readl(mmio + PDC_DIMM_WINDOW_CTLR); offset -= (idx * window_size); idx++; - dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : - (long) (window_size - offset); + dist = min(size, window_size - offset); memcpy_toio(dimm_mmio + offset / 4, psource, dist); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); diff --git a/drivers/base/core.c b/drivers/base/core.c index 3078f44dc..8d87808cd 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -56,6 +56,7 @@ static bool fw_devlink_is_permissive(void); static void __fw_devlink_link_to_consumers(struct device *dev); static bool fw_devlink_drv_reg_done; static bool fw_devlink_best_effort; +static struct workqueue_struct *device_link_wq; /** * __fwnode_link_add - Create a link between two fwnode_handles. @@ -585,12 +586,26 @@ static void devlink_dev_release(struct device *dev) /* * It may take a while to complete this work because of the SRCU * synchronization in device_link_release_fn() and if the consumer or - * supplier devices get deleted when it runs, so put it into the "long" - * workqueue. + * supplier devices get deleted when it runs, so put it into the + * dedicated workqueue. */ - queue_work(system_long_wq, &link->rm_work); + queue_work(device_link_wq, &link->rm_work); } +/** + * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate + */ +void device_link_wait_removal(void) +{ + /* + * devlink removal jobs are queued in the dedicated work queue. + * To be sure that all removal jobs are terminated, ensure that any + * scheduled work has run to completion. + */ + flush_workqueue(device_link_wq); +} +EXPORT_SYMBOL_GPL(device_link_wait_removal); + static struct class devlink_class = { .name = "devlink", .owner = THIS_MODULE, @@ -4132,9 +4147,14 @@ int __init devices_init(void) sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); if (!sysfs_dev_char_kobj) goto char_kobj_err; + device_link_wq = alloc_workqueue("device_link_wq", 0, 0); + if (!device_link_wq) + goto wq_err; return 0; + wq_err: + kobject_put(sysfs_dev_char_kobj); char_kobj_err: kobject_put(sysfs_dev_block_kobj); block_kobj_err: diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index afd094dec..ca0c092ba 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -362,8 +362,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev) return; if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && - wirq->status & WAKE_IRQ_DEDICATED_REVERSE) + wirq->status & WAKE_IRQ_DEDICATED_REVERSE) { enable_irq(wirq->irq); + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; + } } /** diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d7317425b..cc9077b58 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -419,13 +419,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu rcu_read_lock(); for_each_netdev_rcu(&init_net, ifp) { dev_hold(ifp); - if (!is_aoe_netif(ifp)) - goto cont; + if (!is_aoe_netif(ifp)) { + dev_put(ifp); + continue; + } skb = new_skb(sizeof *h + sizeof *ch); if (skb == NULL) { printk(KERN_INFO "aoe: skb alloc failure\n"); - goto cont; + dev_put(ifp); + continue; } skb_put(skb, sizeof *h + sizeof *ch); skb->dev = ifp; @@ -440,9 +443,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu h->major = cpu_to_be16(aoemajor); h->minor = aoeminor; h->cmd = AOECMD_CFG; - -cont: - dev_put(ifp); } rcu_read_unlock(); } diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 63773a905..1e66c7a18 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -64,6 +64,7 @@ tx(int id) __must_hold(&txlock) pr_warn("aoe: packet could not be sent on %s. %s\n", ifp ? ifp->name : "netif", "consider increasing tx_queue_len"); + dev_put(ifp); spin_lock_irq(&txlock); } return 0; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9a53165de..5c4be8dda 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2408,6 +2408,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) } dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); + if (!dev_list) { + nlmsg_free(reply); + ret = -EMSGSIZE; + goto out; + } + if (index == -1) { ret = idr_for_each(&nbd_index_idr, &status_cb, reply); if (ret) { diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 0211f704a..5277090c6 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -758,11 +758,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup); int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { + bdaddr_t bdaddr_swapped; struct sk_buff *skb; int err; - skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr, - HCI_EV_VENDOR, HCI_INIT_TIMEOUT); + baswap(&bdaddr_swapped, bdaddr); + + skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, + &bdaddr_swapped, HCI_EV_VENDOR, + HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 8bfef7f81..33956ddec 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -7,7 +7,6 @@ * * Copyright (C) 2007 Texas Instruments, Inc. * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved. - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. * * Acknowledgements: * This file is based on hci_ll.c, which was... @@ -226,6 +225,7 @@ struct qca_serdev { struct qca_power *bt_power; u32 init_speed; u32 oper_speed; + bool bdaddr_property_broken; const char *firmware_name; }; @@ -1788,6 +1788,7 @@ static int qca_setup(struct hci_uart *hu) const char *firmware_name = qca_get_firmware_name(hu); int ret; struct qca_btsoc_version ver; + struct qca_serdev *qcadev; const char *soc_name; ret = qca_check_speeds(hu); @@ -1845,16 +1846,11 @@ retry: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); - /* Set BDA quirk bit for reading BDA value from fwnode property - * only if that property exist in DT. - */ - if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) { - set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); - bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later"); - } else { - bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA"); - } + qcadev = serdev_device_get_drvdata(hu->serdev); + if (qcadev->bdaddr_property_broken) + set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks); hci_set_aosp_capable(hdev); @@ -2223,6 +2219,9 @@ static int qca_serdev_probe(struct serdev_device *serdev) if (!qcadev->oper_speed) BT_DBG("UART will pick default operating speed"); + qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev, + "qcom,local-bd-address-broken"); + if (data) qcadev->btsoc_type = data->soc_type; else @@ -2254,7 +2253,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR_OR_NULL(qcadev->bt_en) && + if (IS_ERR(qcadev->bt_en) && (data->soc_type == QCA_WCN6750 || data->soc_type == QCA_WCN6855)) { dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); @@ -2263,7 +2262,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", GPIOD_IN); - if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && + if (IS_ERR(qcadev->sw_ctrl) && (data->soc_type == QCA_WCN6750 || data->soc_type == QCA_WCN6855 || data->soc_type == QCA_WCN7850)) @@ -2285,7 +2284,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) default: qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR_OR_NULL(qcadev->bt_en)) { + if (IS_ERR(qcadev->bt_en)) { dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); power_ctrl_enabled = false; } diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 7bfe998f3..bdc763390 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -186,11 +186,12 @@ config SUNXI_RSB config TEGRA_ACONNECT tristate "Tegra ACONNECT Bus Driver" - depends on ARCH_TEGRA_210_SOC + depends on ARCH_TEGRA depends on OF && PM help Driver for the Tegra ACONNECT bus which is used to interface with - the devices inside the Audio Processing Engine (APE) for Tegra210. + the devices inside the Audio Processing Engine (APE) for + Tegra210 and later. config TEGRA_GMI tristate "Tegra Generic Memory Interface bus driver" diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 33fedbd09..9004e0718 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -407,6 +407,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) if (IS_ERR(hw)) return ERR_CAST(hw); + if (!hw) + return NULL; + return hw->core; } diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c index ad0c7f350..60d8a27a9 100644 --- a/drivers/clk/hisilicon/clk-hi3519.c +++ b/drivers/clk/hisilicon/clk-hi3519.c @@ -130,7 +130,7 @@ static void hi3519_clk_unregister(struct platform_device *pdev) of_clk_del_provider(pdev->dev.of_node); hisi_clk_unregister_gate(hi3519_gate_clks, - ARRAY_SIZE(hi3519_mux_clks), + ARRAY_SIZE(hi3519_gate_clks), crg->clk_data); hisi_clk_unregister_mux(hi3519_mux_clks, ARRAY_SIZE(hi3519_mux_clks), diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c index 9ea1a80ac..027227655 100644 --- a/drivers/clk/hisilicon/clk-hi3559a.c +++ b/drivers/clk/hisilicon/clk-hi3559a.c @@ -491,7 +491,6 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks, clk = clk_register(NULL, &p_clk->hw); if (IS_ERR(clk)) { - devm_kfree(dev, p_clk); dev_err(dev, "%s: failed to register clock %s\n", __func__, clks[i].name); continue; diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 2ad380139..7802dabb2 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -2144,7 +2144,9 @@ static struct clk_regmap *const axg_clk_regmaps[] = { &axg_vclk_input, &axg_vclk2_input, &axg_vclk_div, + &axg_vclk_div1, &axg_vclk2_div, + &axg_vclk2_div1, &axg_vclk_div2_en, &axg_vclk_div4_en, &axg_vclk_div6_en, diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c index 735adfefc..e792e0b13 100644 --- a/drivers/clk/qcom/dispcc-sdm845.c +++ b/drivers/clk/qcom/dispcc-sdm845.c @@ -759,6 +759,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_few_wait_val = 0x6, + .en_rest_wait_val = 0x5, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c index 4c5c7a8f4..b9844e41c 100644 --- a/drivers/clk/qcom/gcc-ipq6018.c +++ b/drivers/clk/qcom/gcc-ipq6018.c @@ -1557,6 +1557,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = { static const struct freq_tbl ftbl_pcie_aux_clk_src[] = { F(24000000, P_XO, 1, 0, 0), + { } }; static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = { @@ -1737,6 +1738,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { F(160000000, P_GPLL0, 5, 0, 0), F(216000000, P_GPLL6, 5, 0, 0), F(308570000, P_GPLL6, 3.5, 0, 0), + { } }; static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = { diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index b2e83b389..b52c923a2 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -973,6 +973,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { static const struct freq_tbl ftbl_pcie_aux_clk_src[] = { F(19200000, P_XO, 1, 0, 0), + { } }; static struct clk_rcg2 pcie0_aux_clk_src = { @@ -1078,6 +1079,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(160000000, P_GPLL0, 5, 0, 0), F(308570000, P_GPLL6, 3.5, 0, 0), + { } }; static struct clk_rcg2 sdcc1_ice_core_clk_src = { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index 6af08e0ca..ef15e8f11 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -4038,3 +4038,4 @@ module_exit(gcc_sdm845_exit); MODULE_DESCRIPTION("QTI GCC SDM845 Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:gcc-sdm845"); +MODULE_SOFTDEP("pre: rpmhpd"); diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index e9f971359..5f373c10e 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -334,6 +334,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { F(333430000, P_MMPLL1, 3.5, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), F(466800000, P_MMPLL1, 2.5, 0, 0), + { } }; static struct clk_rcg2 mmss_axi_clk_src = { @@ -358,6 +359,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { F(150000000, P_GPLL0, 4, 0, 0), F(228570000, P_MMPLL0, 3.5, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), + { } }; static struct clk_rcg2 ocmemnoc_clk_src = { diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index 17ed52046..eb2b0e220 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -279,6 +279,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { F(291750000, P_MMPLL1, 4, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), F(466800000, P_MMPLL1, 2.5, 0, 0), + { } }; static struct clk_rcg2 mmss_axi_clk_src = { @@ -303,6 +304,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { F(150000000, P_GPLL0, 4, 0, 0), F(291750000, P_MMPLL1, 4, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), + { } }; static struct clk_rcg2 ocmemnoc_clk_src = { diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c index e45e32804..d96c96a90 100644 --- a/drivers/clk/qcom/reset.c +++ b/drivers/clk/qcom/reset.c @@ -22,8 +22,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id) return 0; } -static int -qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) +static int qcom_reset_set_assert(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) { struct qcom_reset_controller *rst; const struct qcom_reset_map *map; @@ -33,21 +33,22 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) map = &rst->reset_map[id]; mask = map->bitmask ? map->bitmask : BIT(map->bit); - return regmap_update_bits(rst->regmap, map->reg, mask, mask); + regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0); + + /* Read back the register to ensure write completion, ignore the value */ + regmap_read(rst->regmap, map->reg, &mask); + + return 0; } -static int -qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) { - struct qcom_reset_controller *rst; - const struct qcom_reset_map *map; - u32 mask; - - rst = to_qcom_reset_controller(rcdev); - map = &rst->reset_map[id]; - mask = map->bitmask ? map->bitmask : BIT(map->bit); + return qcom_reset_set_assert(rcdev, id, true); +} - return regmap_update_bits(rst->regmap, map->reg, mask, 0); +static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +{ + return qcom_reset_set_assert(rcdev, id, false); } const struct reset_control_ops qcom_reset_ops = { diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c index 27b668def..7a49b91c9 100644 --- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c @@ -159,7 +159,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = { DEF_MOD("cmt1", 911, R8A779F0_CLK_R), DEF_MOD("cmt2", 912, R8A779F0_CLK_R), DEF_MOD("cmt3", 913, R8A779F0_CLK_R), - DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M), + DEF_MOD("pfc0", 915, R8A779F0_CLK_CPEX), DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M), DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC), }; diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c index d5b325e3c..e4c616921 100644 --- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c @@ -22,7 +22,7 @@ enum clk_ids { /* Core Clock Outputs exported to DT */ - LAST_DT_CORE_CLK = R8A779G0_CLK_R, + LAST_DT_CORE_CLK = R8A779G0_CLK_CP, /* External Input Clocks */ CLK_EXTAL, @@ -139,6 +139,7 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = { DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1), DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1), + DEF_FIXED("cp", R8A779G0_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1), DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1), DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1), @@ -169,10 +170,17 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = { DEF_MOD("i2c4", 522, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c5", 523, R8A779G0_CLK_S0D6_PER), DEF_MOD("wdt1:wdt0", 907, R8A779G0_CLK_R), - DEF_MOD("pfc0", 915, R8A779G0_CLK_CL16M), - DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M), - DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M), - DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M), + DEF_MOD("cmt0", 910, R8A779G0_CLK_R), + DEF_MOD("cmt1", 911, R8A779G0_CLK_R), + DEF_MOD("cmt2", 912, R8A779G0_CLK_R), + DEF_MOD("cmt3", 913, R8A779G0_CLK_R), + DEF_MOD("pfc0", 915, R8A779G0_CLK_CP), + DEF_MOD("pfc1", 916, R8A779G0_CLK_CP), + DEF_MOD("pfc2", 917, R8A779G0_CLK_CP), + DEF_MOD("pfc3", 918, R8A779G0_CLK_CP), + DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M), + DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER), + DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER), }; /* diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c index 541761e96..87e463ad4 100644 --- a/drivers/clk/samsung/clk-exynos850.c +++ b/drivers/clk/samsung/clk-exynos850.c @@ -572,7 +572,7 @@ static const struct samsung_div_clock apm_div_clks[] __initconst = { static const struct samsung_gate_clock apm_gate_clks[] __initconst = { GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus", - CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0), + CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus", "mout_clkcmu_chub_bus", CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0), @@ -936,19 +936,19 @@ static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = { static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = { MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p, CLK_CON_MUX_CLK_CMGP_ADC, 0, 1), - MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p, - CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1), - MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p, - CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1), + MUX_F(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p, + CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1, CLK_SET_RATE_PARENT, 0), + MUX_F(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p, + CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_div_clock cmgp_div_clks[] __initconst = { DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus", CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4), - DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0", - CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5), - DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1", - CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5), + DIV_F(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0", + CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5, CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1", + CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = { @@ -963,12 +963,12 @@ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = { "gout_clkcmu_cmgp_bus", CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0), GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0", - CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0), + CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk", "gout_clkcmu_cmgp_bus", CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0), GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1", - CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0), + CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk", "gout_clkcmu_cmgp_bus", CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0), @@ -1409,8 +1409,9 @@ static const struct samsung_mux_clock peri_mux_clks[] __initconst = { mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1), MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user", mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1), - MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p, - PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1), + MUX_F(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", + mout_peri_spi_user_p, PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1, + CLK_SET_RATE_PARENT, 0), }; static const struct samsung_div_clock peri_div_clks[] __initconst = { @@ -1420,8 +1421,8 @@ static const struct samsung_div_clock peri_div_clks[] __initconst = { CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5), DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2", CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5), - DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user", - CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5), + DIV_F(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user", + CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_gate_clock peri_gate_clks[] __initconst = { @@ -1463,7 +1464,7 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = { "mout_peri_bus_user", CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0), GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0", - CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0), + CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user", CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0), GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk", diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 7bdeaff2b..c28d3dacf 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -42,6 +42,7 @@ static void __iomem *zynq_clkc_base; #define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204) #define NUM_MIO_PINS 54 +#define CLK_NAME_LEN 16 #define DBG_CLK_CTRL_CLKACT_TRC BIT(0) #define DBG_CLK_CTRL_CPU_1XCLKACT BIT(1) @@ -215,7 +216,7 @@ static void __init zynq_clk_setup(struct device_node *np) int i; u32 tmp; int ret; - char *clk_name; + char clk_name[CLK_NAME_LEN]; unsigned int fclk_enable = 0; const char *clk_output_name[clk_max]; const char *cpu_parents[4]; @@ -426,12 +427,10 @@ static void __init zynq_clk_setup(struct device_node *np) "gem1_emio_mux", CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); - tmp = strlen("mio_clk_00x"); - clk_name = kmalloc(tmp, GFP_KERNEL); for (i = 0; i < NUM_MIO_PINS; i++) { int idx; - snprintf(clk_name, tmp, "mio_clk_%2.2d", i); + snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i); idx = of_property_match_string(np, "clock-names", clk_name); if (idx >= 0) can_mio_mux_parents[i] = of_clk_get_parent_name(np, @@ -439,7 +438,6 @@ static void __init zynq_clk_setup(struct device_node *np) else can_mio_mux_parents[i] = dummy_nm; } - kfree(clk_name); clk_register_mux(NULL, "can_mux", periph_parents, 4, CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0, &canclk_lock); diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index 44a61dc6f..e1c773bb5 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -32,7 +32,7 @@ #define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */ #define GT_CONTROL_AUTO_INC BIT(3) /* banked */ #define GT_CONTROL_PRESCALER_SHIFT 8 -#define GT_CONTROL_PRESCALER_MAX 0xF +#define GT_CONTROL_PRESCALER_MAX 0xFF #define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \ GT_CONTROL_PRESCALER_SHIFT) diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c index 0b5c0af1c..626d53bf9 100644 --- a/drivers/comedi/drivers/comedi_test.c +++ b/drivers/comedi/drivers/comedi_test.c @@ -85,6 +85,8 @@ struct waveform_private { struct comedi_device *dev; /* parent comedi device */ u64 ao_last_scan_time; /* time of previous AO scan in usec */ unsigned int ao_scan_period; /* AO scan period in usec */ + bool ai_timer_enable:1; /* should AI timer be running? */ + bool ao_timer_enable:1; /* should AO timer be running? */ unsigned short ao_loopbacks[N_CHANS]; }; @@ -234,8 +236,12 @@ static void waveform_ai_timer(struct timer_list *t) time_increment = devpriv->ai_convert_time - now; else time_increment = 1; - mod_timer(&devpriv->ai_timer, - jiffies + usecs_to_jiffies(time_increment)); + spin_lock(&dev->spinlock); + if (devpriv->ai_timer_enable) { + mod_timer(&devpriv->ai_timer, + jiffies + usecs_to_jiffies(time_increment)); + } + spin_unlock(&dev->spinlock); } overrun: @@ -391,9 +397,12 @@ static int waveform_ai_cmd(struct comedi_device *dev, * Seem to need an extra jiffy here, otherwise timer expires slightly * early! */ + spin_lock_bh(&dev->spinlock); + devpriv->ai_timer_enable = true; devpriv->ai_timer.expires = jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; add_timer(&devpriv->ai_timer); + spin_unlock_bh(&dev->spinlock); return 0; } @@ -402,6 +411,9 @@ static int waveform_ai_cancel(struct comedi_device *dev, { struct waveform_private *devpriv = dev->private; + spin_lock_bh(&dev->spinlock); + devpriv->ai_timer_enable = false; + spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ del_timer(&devpriv->ai_timer); @@ -493,8 +505,12 @@ static void waveform_ao_timer(struct timer_list *t) unsigned int time_inc = devpriv->ao_last_scan_time + devpriv->ao_scan_period - now; - mod_timer(&devpriv->ao_timer, - jiffies + usecs_to_jiffies(time_inc)); + spin_lock(&dev->spinlock); + if (devpriv->ao_timer_enable) { + mod_timer(&devpriv->ao_timer, + jiffies + usecs_to_jiffies(time_inc)); + } + spin_unlock(&dev->spinlock); } underrun: @@ -515,9 +531,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev, async->inttrig = NULL; devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); + spin_lock_bh(&dev->spinlock); + devpriv->ao_timer_enable = true; devpriv->ao_timer.expires = jiffies + usecs_to_jiffies(devpriv->ao_scan_period); add_timer(&devpriv->ao_timer); + spin_unlock_bh(&dev->spinlock); return 1; } @@ -602,6 +621,9 @@ static int waveform_ao_cancel(struct comedi_device *dev, struct waveform_private *devpriv = dev->private; s->async->inttrig = NULL; + spin_lock_bh(&dev->spinlock); + devpriv->ao_timer_enable = false; + spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ del_timer(&devpriv->ao_timer); diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index edc294ee5..90dcf26f0 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -320,7 +320,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu, if (target_perf < capacity) des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); - min_perf = READ_ONCE(cpudata->highest_perf); + min_perf = READ_ONCE(cpudata->lowest_perf); if (_min_perf < capacity) min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index b74289a95..bea41ccab 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -14,10 +14,8 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mfd/syscon.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/regmap.h> diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index f644c5e32..552db816e 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -481,8 +481,11 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv) static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct private_data *priv = policy->driver_data; + struct private_data *priv; + if (!policy) + return 0; + priv = policy->driver_data; cpufreq_cpu_put(policy); return brcm_avs_get_frequency(priv->base); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 4aec4b2a5..8f8f1949d 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) if (!priv) return -ENOMEM; - if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL)) return -ENOMEM; cpumask_set_cpu(cpu, priv->cpus); diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index f0e0a35c7..7f326bb5f 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -10,8 +10,10 @@ #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_address.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #include <linux/slab.h> #define LUT_MAX_ENTRIES 32U @@ -295,7 +297,23 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = { static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) { const void *data; - int ret; + int ret, cpu; + struct device *cpu_dev; + struct regulator *cpu_reg; + + /* Make sure that all CPU supplies are available before proceeding. */ + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return dev_err_probe(&pdev->dev, -EPROBE_DEFER, + "Failed to get cpu%d device\n", cpu); + + cpu_reg = devm_regulator_get(cpu_dev, "cpu"); + if (IS_ERR(cpu_reg)) + return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg), + "CPU%d regulator get failed\n", cpu); + } + data = of_device_get_match_data(&pdev->dev); if (!data) diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c index e3313ce63..88afc4994 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -9,7 +9,7 @@ #include <linux/cpufreq.h> #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <asm/machdep.h> #include <asm/cell-regs.h> diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c index 4fba3637b..6f0c32592 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c @@ -11,7 +11,6 @@ #include <linux/types.h> #include <linux/timer.h> #include <linux/init.h> -#include <linux/of_platform.h> #include <linux/pm_qos.h> #include <linux/slab.h> diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index a577586b2..cb03bfb04 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -22,7 +22,6 @@ #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_opp.h> diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index fd2c16821..ac719aca4 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -14,7 +14,7 @@ #include <linux/cpumask.h> #include <linux/export.h> #include <linux/module.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/scpi_protocol.h> #include <linux/slab.h> diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index 1a63aeea8..9c542e723 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -13,7 +13,7 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/regmap.h> diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index f64180dd2..61ef653bc 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -12,7 +12,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/of.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/regmap.h> #include <linux/slab.h> diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index d295f405c..865e50164 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -18,7 +18,6 @@ #include <linux/device.h> #include <linux/module.h> #include <linux/mutex.h> -#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/slab.h> diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index fe9bb2f35..4f36b5a91 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -95,18 +95,28 @@ static void adf_device_reset_worker(struct work_struct *work) if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); - kfree(reset_data); + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) + kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); - /* The dev is back alive. Notify the caller if in sync mode */ - if (reset_data->mode == ADF_DEV_RESET_SYNC) - complete(&reset_data->compl); - else + /* + * The dev is back alive. Notify the caller if in sync mode + * + * If device restart will take a more time than expected, + * the schedule_reset() function can timeout and exit. This can be + * detected by calling the completion_done() function. In this case + * the reset_data structure needs to be freed here. + */ + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) kfree(reset_data); + else + complete(&reset_data->compl); } static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, @@ -139,8 +149,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Reset device timeout expired\n"); ret = -EFAULT; + } else { + kfree(reset_data); } - kfree(reset_data); return ret; } return 0; diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index bf1f421e0..74bd3eb63 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -231,7 +231,10 @@ static int zynqmp_handle_aes_req(struct crypto_engine *engine, err = zynqmp_aes_aead_cipher(areq); } + local_bh_disable(); crypto_finalize_aead_request(engine, areq, err); + local_bh_enable(); + return 0; } diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c index 0a9b099d0..d90479d83 100644 --- a/drivers/dma-buf/st-dma-fence-chain.c +++ b/drivers/dma-buf/st-dma-fence-chain.c @@ -84,11 +84,11 @@ static int sanitycheck(void *arg) return -ENOMEM; chain = mock_chain(NULL, f, 1); - if (!chain) + if (chain) + dma_fence_enable_sw_signaling(chain); + else err = -ENOMEM; - dma_fence_enable_sw_signaling(chain); - dma_fence_signal(f); dma_fence_put(f); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 81de833cc..66ef0a111 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -665,16 +665,16 @@ config TEGRA20_APB_DMA config TEGRA210_ADMA tristate "NVIDIA Tegra210 ADMA support" - depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) + depends on (ARCH_TEGRA || COMPILE_TEST) select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Support for the NVIDIA Tegra210 ADMA controller driver. The - DMA controller has multiple DMA channels and is used to service - various audio clients in the Tegra210 audio processing engine - (APE). This DMA controller transfers data from memory to - peripheral and vice versa. It does not support memory to - memory data transfer. + Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA + controller driver. The DMA controller has multiple DMA channels + and is used to service various audio clients in the Tegra210 + audio processing engine (APE). This DMA controller transfers + data from memory to peripheral and vice versa. It does not + support memory to memory data transfer. config TIMB_DMA tristate "Timberdale FPGA DMA support" diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 8aaa7fcb2..401a77e3b 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -500,7 +500,19 @@ static void bm_work(struct work_struct *work) fw_notice(card, "phy config: new root=%x, gap_count=%d\n", new_root_id, gap_count); fw_send_phy_config(card, new_root_id, generation, gap_count); - reset_bus(card, true); + /* + * Where possible, use a short bus reset to minimize + * disruption to isochronous transfers. But in the event + * of a gap count inconsistency, use a long bus reset. + * + * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus + * may set different gap counts after a bus reset. On a mixed + * 1394/1394a bus, a short bus reset can get doubled. Some + * nodes may treat the double reset as one bus reset and others + * may treat it as two, causing a gap count inconsistency + * again. Using a long bus reset prevents this. + */ + reset_bus(card, card->gap_count != 0); /* Will allocate broadcast channel after the reset. */ goto out; } diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index ac0bd51ef..42ea308a2 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -171,6 +171,13 @@ static int smc_chan_free(int id, void *p, void *data) struct scmi_chan_info *cinfo = p; struct scmi_smc *scmi_info = cinfo->transport_info; + /* + * Different protocols might share the same chan info, so a previous + * smc_chan_free call might have already freed the structure. + */ + if (!scmi_info) + return 0; + /* Ignore any possible further reception on the IRQ path */ if (scmi_info->irq > 0) free_irq(scmi_info->irq, scmi_info); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9077353d1..28d4defc5 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -194,6 +194,8 @@ static bool generic_ops_supported(void) name_size = sizeof(name); + if (!efi.get_next_variable) + return false; status = efi.get_next_variable(&name_size, &name, &guid); if (status == EFI_UNSUPPORTED) return false; diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 7ba05719a..fff826f56 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -119,7 +119,7 @@ efi_status_t efi_random_alloc(unsigned long size, continue; } - target = round_up(md->phys_addr, align) + target_slot * align; + target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align; pages = size / EFI_PAGE_SIZE; status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 784e1b2ae..dc50dda40 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -21,6 +21,8 @@ #include "efistub.h" #include "x86-stub.h" +extern char _bss[], _ebss[]; + const efi_system_table_t *efi_system_table; const efi_dxe_services_table_t *efi_dxe_table; static efi_loaded_image_t *image = NULL; @@ -432,6 +434,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_status_t status; char *cmdline_ptr; + if (efi_is_native()) + memset(_bss, 0, _ebss - _bss); + efi_system_table = sys_table_arg; /* Check if we were booted by the EFI firmware */ @@ -950,8 +955,6 @@ fail: void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg, struct boot_params *boot_params) { - extern char _bss[], _ebss[]; - memset(_bss, 0, _ebss - _bss); efi_stub_entry(handle, sys_table_arg, boot_params); } diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3e8e5f4ff..700f71c95 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -679,7 +679,8 @@ config GPIO_UNIPHIER Say yes here to support UniPhier GPIOs. config GPIO_VF610 - def_bool y + bool "VF610 GPIO support" + default y if SOC_VF610 depends on ARCH_MXC select GPIOLIB_IRQCHIP help diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e636c7850..dd22d2559 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1342,6 +1342,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_release_kms(struct drm_device *dev); int amdgpu_device_ip_suspend(struct amdgpu_device *adev); +int amdgpu_device_prepare(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6a4749c0c..b11690a81 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1639,6 +1639,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, } else { pr_info("switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + amdgpu_device_prepare(dev); amdgpu_device_suspend(dev, true); amdgpu_device_cache_pci_state(pdev); /* Shut down the device */ @@ -4168,6 +4169,43 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) * Suspend & resume. */ /** + * amdgpu_device_prepare - prepare for device suspend + * + * @dev: drm dev pointer + * + * Prepare to put the hw in the suspend state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver suspend. + */ +int amdgpu_device_prepare(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int i, r; + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + /* Evict the majority of BOs before starting suspend sequence */ + r = amdgpu_device_evict_resources(adev); + if (r) + return r; + + flush_delayed_work(&adev->gfx.gfx_off_delay_work); + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].version->funcs->prepare_suspend) + continue; + r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev); + if (r) + return r; + } + + return 0; +} + +/** * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f24c3a20e..9a5416331 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2391,8 +2391,9 @@ static int amdgpu_pmops_prepare(struct device *dev) /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly */ - if (amdgpu_device_supports_boco(drm_dev)) - return pm_runtime_suspended(dev); + if (amdgpu_device_supports_boco(drm_dev) && + pm_runtime_suspended(dev)) + return 1; /* if we will not support s3 or s2i for the device * then skip suspend @@ -2401,7 +2402,7 @@ static int amdgpu_pmops_prepare(struct device *dev) !amdgpu_acpi_is_s3_active(adev)) return 1; - return 0; + return amdgpu_device_prepare(drm_dev); } static void amdgpu_pmops_complete(struct device *dev) @@ -2600,6 +2601,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) if (amdgpu_device_supports_boco(drm_dev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; + ret = amdgpu_device_prepare(drm_dev); + if (ret) + return ret; ret = amdgpu_device_suspend(drm_dev, false); if (ret) { adev->in_runpm = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 158b79188..dfb9d4200 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -838,6 +838,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, gtt->ttm.dma_address, flags); } + gtt->bound = true; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 1c5d9388a..cb6eb47aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -313,7 +313,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, DEBUG("IMM 0x%02X\n", val); return val; } - return 0; + break; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 489c89465..c373a2a32 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -584,11 +584,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev) return AMD_RESET_METHOD_MODE1; } +static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + /* Will reset for the following suspend abort cases. + * 1) Only reset limit on APU side, dGPU hasn't checked yet. + * 2) S3 suspend abort and TOS already launched. + */ + if (adev->flags & AMD_IS_APU && adev->in_s3 && + !adev->suspend_complete && + sol_reg) + return true; + + return false; +} + static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) || - (adev->apu_flags & AMD_APU_IS_RAVEN2)) + /* On the latest Raven, the GPU reset can be performed + * successfully. So now, temporarily enable it for the + * S3 suspend abort case. + */ + if (((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) && + !soc15_need_reset_on_resume(adev)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -1285,24 +1308,6 @@ static int soc15_common_suspend(void *handle) return soc15_common_hw_fini(adev); } -static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) -{ - u32 sol_reg; - - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - - /* Will reset for the following suspend abort cases. - * 1) Only reset limit on APU side, dGPU hasn't checked yet. - * 2) S3 suspend abort and TOS already launched. - */ - if (adev->flags & AMD_IS_APU && adev->in_s3 && - !adev->suspend_complete && - sol_reg) - return true; - - return false; -} - static int soc15_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index ec8a576ac..3c7d267f2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1349,7 +1349,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { - return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) || + return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) || (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a6c6f286a..ff460c980 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -10503,14 +10503,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (range->flags != 1) continue; - amdgpu_dm_connector->min_vfreq = range->min_vfreq; - amdgpu_dm_connector->max_vfreq = range->max_vfreq; - amdgpu_dm_connector->pixel_clock_mhz = - range->pixel_clock_mhz * 10; - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; connector->display_info.monitor_range.max_vfreq = range->max_vfreq; + if (edid->revision >= 4) { + if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) + connector->display_info.monitor_range.min_vfreq += 255; + if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) + connector->display_info.monitor_range.max_vfreq += 255; + } + + amdgpu_dm_connector->min_vfreq = + connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = + connector->display_info.monitor_range.max_vfreq; + amdgpu_dm_connector->pixel_clock_mhz = + range->pixel_clock_mhz * 10; + break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index ee242d9d8..ff7dd17ad 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1358,7 +1358,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, const uint32_t rd_buf_size = 10; struct pipe_ctx *pipe_ctx; ssize_t result = 0; - int i, r, str_len = 30; + int i, r, str_len = 10; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 009b5861a..d6c5d48c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1854,6 +1854,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, { struct dpp *dpp = pipe_ctx->plane_res.dpp; + if (!stream) + return false; + if (dpp == NULL) return false; @@ -1876,8 +1879,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, } else dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); - if (stream != NULL && stream->ctx != NULL && - stream->out_transfer_func != NULL) { + if (stream->ctx && + stream->out_transfer_func) { log_tf(stream->ctx, stream->out_transfer_func, dpp->regamma_params.hw_points_num); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 72bec33e3..0225b2c96 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -651,10 +651,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) if (pipe_ctx == NULL) return; - if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) + if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) { pipe_ctx->stream_res.stream_enc->funcs->set_avmute( pipe_ctx->stream_res.stream_enc, enable); + + /* Wait for two frame to make sure AV mute is sent out */ + if (enable) { + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + } + } } void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index ee67a35c2..ff930a71e 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f175e65b8..34467427c 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -294,6 +294,7 @@ struct amd_ip_funcs { int (*hw_init)(void *handle); int (*hw_fini)(void *handle); void (*late_fini)(void *handle); + int (*prepare_suspend)(void *handle); int (*suspend)(void *handle); int (*resume)(void *handle); bool (*is_idle)(void *handle); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 29f3d8431..cdb406690 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2344,6 +2344,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); int err, ret; + u32 pwm_mode; int value; if (amdgpu_in_reset(adev)) @@ -2355,13 +2356,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, if (err) return err; + if (value == 0) + pwm_mode = AMD_FAN_CTRL_NONE; + else if (value == 1) + pwm_mode = AMD_FAN_CTRL_MANUAL; + else if (value == 2) + pwm_mode = AMD_FAN_CTRL_AUTO; + else + return -EINVAL; + ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; } - ret = amdgpu_dpm_set_fan_control_mode(adev, value); + ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index f634371c7..7fd3de89e 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -207,19 +207,24 @@ EXPORT_SYMBOL(drm_panel_disable); * The modes probed from the panel are automatically added to the connector * that the panel is attached to. * - * Return: The number of modes available from the panel on success or a - * negative error code on failure. + * Return: The number of modes available from the panel on success, or 0 on + * failure (no modes). */ int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { if (!panel) - return -EINVAL; + return 0; - if (panel->funcs && panel->funcs->get_modes) - return panel->funcs->get_modes(panel, connector); + if (panel->funcs && panel->funcs->get_modes) { + int num; - return -EOPNOTSUPP; + num = panel->funcs->get_modes(panel, connector); + if (num > 0) + return num; + } + + return 0; } EXPORT_SYMBOL(drm_panel_get_modes); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 3b968ad18..52dbaf74f 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -362,6 +362,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector) count = connector_funcs->get_modes(connector); + /* The .get_modes() callback should not return negative values. */ + if (count < 0) { + drm_err(connector->dev, ".get_modes() returned %pe\n", + ERR_PTR(count)); + count = 0; + } + /* * Fallback for when DDC probe failed in drm_get_edid() and thus skipped * override/firmware EDID. diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 1d2b4fb4b..f29952a55 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -488,7 +488,7 @@ static const struct drm_driver etnaviv_drm_driver = { .desc = "etnaviv DRM", .date = "20151214", .major = 1, - .minor = 3, + .minor = 4, }; /* diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c index f2fc645c7..212e7050c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c @@ -135,6 +135,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) { struct etnaviv_chip_identity *ident = &gpu->identity; + const u32 product_id = ident->product_id; + const u32 customer_id = ident->customer_id; + const u32 eco_id = ident->eco_id; int i; for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) { @@ -148,6 +151,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) etnaviv_chip_identities[i].eco_id == ~0U)) { memcpy(ident, &etnaviv_chip_identities[i], sizeof(*ident)); + + /* Restore some id values as ~0U aka 'don't care' might been used. */ + ident->product_id = product_id; + ident->customer_id = customer_id; + ident->eco_id = eco_id; + return true; } } diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index f5e1adfca..fb941a8c9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector) */ if (!ctx->raw_edid) { DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n"); - return -EFAULT; + return 0; } edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); if (!edid) { DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n"); - return -ENOMEM; + return 0; } drm_connector_update_edid_property(connector, edid); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 1a7194a65..be2d9cbaa 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector) int ret; if (!hdata->ddc_adpt) - return -ENODEV; + return 0; edid = drm_get_edid(connector, hdata->ddc_adpt); if (!edid) - return -ENODEV; + return 0; hdata->dvi_mode = !connector->display_info.is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index a70b70617..9cc1ef2ca 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -3413,6 +3413,9 @@ static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_enc { const struct child_device_config *child = &devdata->child; + if (!devdata) + return false; + if (!intel_bios_encoder_supports_dp(devdata) || !intel_bios_encoder_supports_hdmi(devdata)) return false; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index ba14b18d6..2e7c52c2e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -378,6 +378,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj) { GEM_WARN_ON(obj->userptr.page_ref); + if (!obj->userptr.notifier.mm) + return; + mmu_interval_notifier_remove(&obj->userptr.notifier); obj->userptr.notifier.mm = NULL; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index b0a4a2dbe..feb0fc32a 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -253,9 +253,6 @@ static int __engine_park(struct intel_wakeref *wf) intel_engine_park_heartbeat(engine); intel_breadcrumbs_park(engine->breadcrumbs); - /* Must be reset upon idling, or we may miss the busy wakeup. */ - GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN); - if (engine->park) engine->park(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index f903ee1ce..eae138b9f 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -3270,6 +3270,9 @@ static void execlists_park(struct intel_engine_cs *engine) { cancel_timer(&engine->execlists.timer); cancel_timer(&engine->execlists.preempt); + + /* Reset upon idling, or we may delay the busy wakeup. */ + WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN); } static void add_to_engine(struct i915_request *rq) diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 06723b2e9..64b6bc2de 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) int ret; if (!mode) - return -EINVAL; + return 0; ret = of_get_drm_display_mode(np, &imxpd->mode, &imxpd->bus_flags, OF_USE_NATIVE_MODE); if (ret) { drm_mode_destroy(connector->dev, mode); - return ret; + return 0; } drm_mode_copy(mode, &imxpd->mode); diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 0f1ca0b0d..d72c5bf4e 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -75,29 +75,34 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) } else { bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); if (!bo->base.sgt) { - sg_free_table(&sgt); - return -ENOMEM; + ret = -ENOMEM; + goto err_out0; } } ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); - if (ret) { - sg_free_table(&sgt); - kfree(bo->base.sgt); - bo->base.sgt = NULL; - return ret; - } + if (ret) + goto err_out1; *bo->base.sgt = sgt; if (vm) { ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT); if (ret) - return ret; + goto err_out2; } bo->heap_size = new_size; return 0; + +err_out2: + dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); +err_out1: + kfree(bo->base.sgt); + bo->base.sgt = NULL; +err_out0: + sg_free_table(&sgt); + return ret; } int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 558000db4..beaaf4400 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -91,11 +91,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) struct drm_crtc *crtc = &mtk_crtc->base; unsigned long flags; - spin_lock_irqsave(&crtc->dev->event_lock, flags); - drm_crtc_send_vblank_event(crtc, mtk_crtc->event); - drm_crtc_vblank_put(crtc); - mtk_crtc->event = NULL; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + if (mtk_crtc->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, mtk_crtc->event); + drm_crtc_vblank_put(crtc); + mtk_crtc->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } } static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 3e74c7c1b..d871b1dba 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -70,8 +70,8 @@ #define DSI_PS_WC 0x3fff #define DSI_PS_SEL (3 << 16) #define PACKED_PS_16BIT_RGB565 (0 << 16) -#define LOOSELY_PS_18BIT_RGB666 (1 << 16) -#define PACKED_PS_18BIT_RGB666 (2 << 16) +#define PACKED_PS_18BIT_RGB666 (1 << 16) +#define LOOSELY_PS_24BIT_RGB666 (2 << 16) #define PACKED_PS_24BIT_RGB888 (3 << 16) #define DSI_VSA_NL 0x20 @@ -366,10 +366,10 @@ static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi) ps_bpp_mode |= PACKED_PS_24BIT_RGB888; break; case MIPI_DSI_FMT_RGB666: - ps_bpp_mode |= PACKED_PS_18BIT_RGB666; + ps_bpp_mode |= LOOSELY_PS_24BIT_RGB666; break; case MIPI_DSI_FMT_RGB666_PACKED: - ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666; + ps_bpp_mode |= PACKED_PS_18BIT_RGB666; break; case MIPI_DSI_FMT_RGB565: ps_bpp_mode |= PACKED_PS_16BIT_RGB565; @@ -423,7 +423,7 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi) dsi_tmp_buf_bpp = 3; break; case MIPI_DSI_FMT_RGB666: - tmp_reg = LOOSELY_PS_18BIT_RGB666; + tmp_reg = LOOSELY_PS_24BIT_RGB666; dsi_tmp_buf_bpp = 3; break; case MIPI_DSI_FMT_RGB666_PACKED: diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 25245ef38..3632f0768 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -228,6 +228,13 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) return dpu_enc->wide_bus_en; } +bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc) +{ + const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + + return dpu_enc->dsc ? true : false; +} + int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; @@ -1864,7 +1871,9 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, dsc_common_mode = 0; pic_width = dsc->pic_width; - dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL; + dsc_common_mode = DSC_MODE_SPLIT_PANEL; + if (dpu_encoder_use_dsc_merge(enc_master->parent)) + dsc_common_mode |= DSC_MODE_MULTIPLEX; if (enc_master->intf_mode == INTF_MODE_VIDEO) dsc_common_mode |= DSC_MODE_VIDEO; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index 9e7236ef3..a71efa2b9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -176,6 +176,13 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc); bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc); /** + * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled + * for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ +bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc); + +/** * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained * in virtual encoder that can collect CRC values * @drm_enc: Pointer to previously created drm encoder structure diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 2c1464666..2baade1cd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -100,6 +100,7 @@ static void drm_mode_to_intf_timing_params( } timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent); + timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent); /* * for DP, divide the horizonal parameters by 2 when @@ -256,12 +257,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine( mode.htotal >>= 1; mode.hsync_start >>= 1; mode.hsync_end >>= 1; + mode.hskew >>= 1; DPU_DEBUG_VIDENC(phys_enc, - "split_role %d, halve horizontal %d %d %d %d\n", + "split_role %d, halve horizontal %d %d %d %d %d\n", phys_enc->split_role, mode.hdisplay, mode.htotal, - mode.hsync_start, mode.hsync_end); + mode.hsync_start, mode.hsync_end, + mode.hskew); } drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 384558d2f..1debac4fc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -154,13 +154,8 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width; display_hctl = (hsync_end_x << 16) | hsync_start_x; - /* - * DATA_HCTL_EN controls data timing which can be different from - * video timing. It is recommended to enable it for all cases, except - * if compression is enabled in 1 pixel per clock mode - */ if (p->wide_bus_en) - intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN; + intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN; data_width = p->width; @@ -230,6 +225,14 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg); DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format); if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) { + /* + * DATA_HCTL_EN controls data timing which can be different from + * video timing. It is recommended to enable it for all cases, except + * if compression is enabled in 1 pixel per clock mode + */ + if (!(p->compression_en && !p->wide_bus_en)) + intf_cfg2 |= INTF_CFG2_DATA_HCTL_EN; + DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2); DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl); DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index e75339b96..7f502c8be 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -33,6 +33,7 @@ struct intf_timing_params { u32 hsync_skew; bool wide_bus_en; + bool compression_en; }; struct intf_prog_fetch { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 126b3c6e1..f2dca41e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1194,6 +1194,8 @@ out: drm_vma_node_unmap(&nvbo->bo.base.vma_node, bdev->dev_mapping); nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); + nvbo->bo.resource->bus.offset = 0; + nvbo->bo.resource->bus.addr = NULL; goto retry; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 20fe53815..6ca4a46a8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -379,9 +379,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) dma_addr_t *dma_addrs; struct nouveau_fence *fence; - src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL); - dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL); - dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL); + src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL); + dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL); + dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL); migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT, npages); @@ -407,11 +407,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) migrate_device_pages(src_pfns, dst_pfns, npages); nouveau_dmem_fence_done(&fence); migrate_device_finalize(src_pfns, dst_pfns, npages); - kfree(src_pfns); - kfree(dst_pfns); + kvfree(src_pfns); + kvfree(dst_pfns); for (i = 0; i < npages; i++) dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); - kfree(dma_addrs); + kvfree(dma_addrs); } void diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 42584d8a9..bfcddd4aa 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -413,8 +413,7 @@ static int panel_edp_unprepare(struct drm_panel *panel) if (!p->prepared) return 0; - pm_runtime_mark_last_busy(panel->dev); - ret = pm_runtime_put_autosuspend(panel->dev); + ret = pm_runtime_put_sync_suspend(panel->dev); if (ret < 0) return ret; p->prepared = false; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 55d243048..40b631445 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -379,19 +379,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev) gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, - val, !val, 1, 1000); + val, !val, 1, 2000); if (ret) dev_err(pfdev->dev, "shader power transition timeout"); gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO, - val, !val, 1, 1000); + val, !val, 1, 2000); if (ret) dev_err(pfdev->dev, "tiler power transition timeout"); gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, - val, !val, 0, 1000); + val, !val, 0, 2000); if (ret) dev_err(pfdev->dev, "l2 power transition timeout"); } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 927e5f42e..3e48cbb52 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -813,7 +813,7 @@ int ni_init_microcode(struct radeon_device *rdev) err = 0; } else if (rdev->smc_fw->size != smc_req_size) { pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n", - rdev->mc_fw->size, fw_name); + rdev->smc_fw->size, fw_name); err = -EINVAL; } } diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index f51774866..8f230f4c0 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -411,7 +411,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi, hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF); hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF); - value = mode->hsync_start - mode->hdisplay; + value = mode->htotal - mode->hsync_start; hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF); hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF); @@ -426,7 +426,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi, value = mode->vtotal - mode->vdisplay; hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF); - value = mode->vsync_start - mode->vdisplay; + value = mode->vtotal - mode->vsync_start; hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF); value = mode->vsync_end - mode->vsync_start; diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 68f6ebb33..eb4a108c5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -577,8 +577,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, ret = -EINVAL; goto err_put_port; } else if (ret) { - DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n"); - ret = -EPROBE_DEFER; + dev_err_probe(dev, ret, "failed to find panel and bridge node\n"); goto err_put_port; } if (lvds->panel) diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index d773ef485..b563988fb 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -524,7 +524,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev) if (err < 0) { dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n", dpaux->irq, err); - return err; + goto err_pm_disable; } disable_irq(dpaux->irq); @@ -544,7 +544,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev) */ err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C); if (err < 0) - return err; + goto err_pm_disable; #ifdef CONFIG_GENERIC_PINCONF dpaux->desc.name = dev_name(&pdev->dev); @@ -557,7 +557,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev) dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux); if (IS_ERR(dpaux->pinctrl)) { dev_err(&pdev->dev, "failed to register pincontrol\n"); - return PTR_ERR(dpaux->pinctrl); + err = PTR_ERR(dpaux->pinctrl); + goto err_pm_disable; } #endif /* enable and clear all interrupts */ @@ -573,10 +574,15 @@ static int tegra_dpaux_probe(struct platform_device *pdev) err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux); if (err < 0) { dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err); - return err; + goto err_pm_disable; } return 0; + +err_pm_disable: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return err; } static int tegra_dpaux_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index de1333dc0..7bb26655c 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1534,9 +1534,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0); if (np) { struct platform_device *gangster = of_find_device_by_node(np); + of_node_put(np); + if (!gangster) + return -EPROBE_DEFER; dsi->slave = platform_get_drvdata(gangster); - of_node_put(np); if (!dsi->slave) { put_device(&gangster->dev); @@ -1584,48 +1586,58 @@ static int tegra_dsi_probe(struct platform_device *pdev) if (!pdev->dev.pm_domain) { dsi->rst = devm_reset_control_get(&pdev->dev, "dsi"); - if (IS_ERR(dsi->rst)) - return PTR_ERR(dsi->rst); + if (IS_ERR(dsi->rst)) { + err = PTR_ERR(dsi->rst); + goto remove; + } } dsi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dsi->clk)) { - dev_err(&pdev->dev, "cannot get DSI clock\n"); - return PTR_ERR(dsi->clk); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk), + "cannot get DSI clock\n"); + goto remove; } dsi->clk_lp = devm_clk_get(&pdev->dev, "lp"); if (IS_ERR(dsi->clk_lp)) { - dev_err(&pdev->dev, "cannot get low-power clock\n"); - return PTR_ERR(dsi->clk_lp); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp), + "cannot get low-power clock\n"); + goto remove; } dsi->clk_parent = devm_clk_get(&pdev->dev, "parent"); if (IS_ERR(dsi->clk_parent)) { - dev_err(&pdev->dev, "cannot get parent clock\n"); - return PTR_ERR(dsi->clk_parent); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent), + "cannot get parent clock\n"); + goto remove; } dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi"); if (IS_ERR(dsi->vdd)) { - dev_err(&pdev->dev, "cannot get VDD supply\n"); - return PTR_ERR(dsi->vdd); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd), + "cannot get VDD supply\n"); + goto remove; } err = tegra_dsi_setup_clocks(dsi); if (err < 0) { dev_err(&pdev->dev, "cannot setup clocks\n"); - return err; + goto remove; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); dsi->regs = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(dsi->regs)) - return PTR_ERR(dsi->regs); + if (IS_ERR(dsi->regs)) { + err = PTR_ERR(dsi->regs); + goto remove; + } dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node); - if (IS_ERR(dsi->mipi)) - return PTR_ERR(dsi->mipi); + if (IS_ERR(dsi->mipi)) { + err = PTR_ERR(dsi->mipi); + goto remove; + } dsi->host.ops = &tegra_dsi_host_ops; dsi->host.dev = &pdev->dev; @@ -1653,9 +1665,12 @@ static int tegra_dsi_probe(struct platform_device *pdev) return 0; unregister: + pm_runtime_disable(&pdev->dev); mipi_dsi_host_unregister(&dsi->host); mipi_free: tegra_mipi_free(dsi->mipi); +remove: + tegra_output_remove(&dsi->output); return err; } diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 929120915..a688ecf08 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -166,6 +166,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, if (gem->size < size) { err = -EINVAL; + drm_gem_object_put(gem); goto unreference; } diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index bf240767d..c66764c0b 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1776,7 +1776,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data) static int tegra_hdmi_probe(struct platform_device *pdev) { struct tegra_hdmi *hdmi; - struct resource *regs; int err; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); @@ -1838,14 +1837,15 @@ static int tegra_hdmi_probe(struct platform_device *pdev) if (err < 0) return err; - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdmi->regs = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(hdmi->regs)) - return PTR_ERR(hdmi->regs); + hdmi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(hdmi->regs)) { + err = PTR_ERR(hdmi->regs); + goto remove; + } err = platform_get_irq(pdev, 0); if (err < 0) - return err; + goto remove; hdmi->irq = err; @@ -1854,18 +1854,18 @@ static int tegra_hdmi_probe(struct platform_device *pdev) if (err < 0) { dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", hdmi->irq, err); - return err; + goto remove; } platform_set_drvdata(pdev, hdmi); err = devm_pm_runtime_enable(&pdev->dev); if (err) - return err; + goto remove; err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); if (err) - return err; + goto remove; INIT_LIST_HEAD(&hdmi->client.list); hdmi->client.ops = &hdmi_client_ops; @@ -1875,10 +1875,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev) if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); - return err; + goto remove; } return 0; + +remove: + tegra_output_remove(&hdmi->output); + return err; } static int tegra_hdmi_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 47d26b5d9..7ccd010a8 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -139,8 +139,10 @@ int tegra_output_probe(struct tegra_output *output) GPIOD_IN, "HDMI hotplug detect"); if (IS_ERR(output->hpd_gpio)) { - if (PTR_ERR(output->hpd_gpio) != -ENOENT) - return PTR_ERR(output->hpd_gpio); + if (PTR_ERR(output->hpd_gpio) != -ENOENT) { + err = PTR_ERR(output->hpd_gpio); + goto put_i2c; + } output->hpd_gpio = NULL; } @@ -149,7 +151,7 @@ int tegra_output_probe(struct tegra_output *output) err = gpiod_to_irq(output->hpd_gpio); if (err < 0) { dev_err(output->dev, "gpiod_to_irq(): %d\n", err); - return err; + goto put_i2c; } output->hpd_irq = err; @@ -162,7 +164,7 @@ int tegra_output_probe(struct tegra_output *output) if (err < 0) { dev_err(output->dev, "failed to request IRQ#%u: %d\n", output->hpd_irq, err); - return err; + goto put_i2c; } output->connector.polled = DRM_CONNECTOR_POLL_HPD; @@ -176,6 +178,12 @@ int tegra_output_probe(struct tegra_output *output) } return 0; + +put_i2c: + if (output->ddc) + i2c_put_adapter(output->ddc); + + return err; } void tegra_output_remove(struct tegra_output *output) diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index ff8fce36d..86e55e5d1 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -214,26 +214,28 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) rgb->clk = devm_clk_get(dc->dev, NULL); if (IS_ERR(rgb->clk)) { dev_err(dc->dev, "failed to get clock\n"); - return PTR_ERR(rgb->clk); + err = PTR_ERR(rgb->clk); + goto remove; } rgb->clk_parent = devm_clk_get(dc->dev, "parent"); if (IS_ERR(rgb->clk_parent)) { dev_err(dc->dev, "failed to get parent clock\n"); - return PTR_ERR(rgb->clk_parent); + err = PTR_ERR(rgb->clk_parent); + goto remove; } err = clk_set_parent(rgb->clk, rgb->clk_parent); if (err < 0) { dev_err(dc->dev, "failed to set parent clock: %d\n", err); - return err; + goto remove; } rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0"); if (IS_ERR(rgb->pll_d_out0)) { err = PTR_ERR(rgb->pll_d_out0); dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err); - return err; + goto remove; } if (dc->soc->has_pll_d2_out0) { @@ -241,13 +243,19 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) if (IS_ERR(rgb->pll_d2_out0)) { err = PTR_ERR(rgb->pll_d2_out0); dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err); - return err; + goto put_pll; } } dc->rgb = &rgb->output; return 0; + +put_pll: + clk_put(rgb->pll_d_out0); +remove: + tegra_output_remove(&rgb->output); + return err; } int tegra_dc_rgb_remove(struct tegra_dc *dc) diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index cb66a425d..896a77853 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -270,6 +270,16 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc, reinit_completion(&tcrtc->framedone_completion); + /* + * If a layer is left enabled when the videoport is disabled, and the + * vid pipeline that was used for the layer is taken into use on + * another videoport, the DSS will report sync lost issues. Disable all + * the layers here as a work-around. + */ + for (u32 layer = 0; layer < tidss->feat->num_planes; layer++) + dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer, + false); + dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport); if (!wait_for_completion_timeout(&tcrtc->framedone_completion, diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index 42d50ec55..435b3b66a 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -211,7 +211,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss, drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs); - drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0, + drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0, num_planes - 1); ret = drm_plane_create_color_properties(&tplane->plane, diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ea2eaf603..f69681891 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -497,7 +497,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) edid = drm_get_edid(connector, vc4_hdmi->ddc); cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); if (!edid) - return -ENODEV; + return 0; drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index b909a3ce9..9d7a1b710 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1429,12 +1429,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw) root, "system_ttm"); ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM), root, "vram_ttm"); - ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR), - root, "gmr_ttm"); - ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB), - root, "mob_ttm"); - ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM), - root, "system_mob_ttm"); + if (vmw->has_gmr) + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR), + root, "gmr_ttm"); + if (vmw->has_mob) { + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB), + root, "mob_ttm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM), + root, "system_mob_ttm"); + } } static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index bc7f02e4e..2f7ac9114 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, vmw_res_type(ctx) == vmw_res_dx_context) { for (i = 0; i < cotable_max; ++i) { res = vmw_context_cotable(ctx, i); - if (IS_ERR(res)) + if (IS_ERR_OR_NULL(res)) continue; ret = vmw_execbuf_res_val_add(sw_context, res, @@ -1259,6 +1259,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, return -EINVAL; cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); + if (IS_ERR_OR_NULL(cotable_res)) + return cotable_res ? PTR_ERR(cotable_res) : -EINVAL; ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); return ret; @@ -2477,6 +2479,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, return ret; res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; ret = vmw_cotable_notify(res, cmd->defined_id); if (unlikely(ret != 0)) return ret; @@ -2562,8 +2566,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, so_type = vmw_so_cmd_to_type(header->id); res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); - if (IS_ERR(res)) - return PTR_ERR(res); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cotable_notify(res, cmd->defined_id); @@ -2682,6 +2686,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, return -EINVAL; res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; ret = vmw_cotable_notify(res, cmd->body.shaderId); if (ret) return ret; @@ -3003,6 +3009,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, } res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; ret = vmw_cotable_notify(res, cmd->body.soid); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 60e3cc537..b9e5c8cd3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -65,8 +65,11 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, ttm_resource_init(bo, place, *res); id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); - if (id < 0) + if (id < 0) { + ttm_resource_fini(man, *res); + kfree(*res); return id; + } spin_lock(&gman->lock); diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index c936d6a51..9c963ad27 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -34,6 +34,8 @@ static int sensor_mask_override = -1; module_param_named(sensor_mask, sensor_mask_override, int, 0444); MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask"); +static bool intr_disable = true; + static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts) { union cmd_response cmd_resp; @@ -54,7 +56,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen cmd_base.ul = 0; cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR; - cmd_base.cmd_v2.intr_disable = 1; + cmd_base.cmd_v2.intr_disable = intr_disable; cmd_base.cmd_v2.period = info.period; cmd_base.cmd_v2.sensor_id = info.sensor_idx; cmd_base.cmd_v2.length = 16; @@ -72,7 +74,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx) cmd_base.ul = 0; cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR; - cmd_base.cmd_v2.intr_disable = 1; + cmd_base.cmd_v2.intr_disable = intr_disable; cmd_base.cmd_v2.period = 0; cmd_base.cmd_v2.sensor_id = sensor_idx; cmd_base.cmd_v2.length = 16; @@ -86,7 +88,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata) union sfh_cmd_base cmd_base; cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS; - cmd_base.cmd_v2.intr_disable = 1; + cmd_base.cmd_v2.intr_disable = intr_disable; cmd_base.cmd_v2.period = 0; cmd_base.cmd_v2.sensor_id = 0; @@ -288,6 +290,26 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata) return 0; } +static int mp2_disable_intr(const struct dmi_system_id *id) +{ + intr_disable = false; + return 0; +} + +static const struct dmi_system_id dmi_sfh_table[] = { + { + /* + * https://bugzilla.kernel.org/show_bug.cgi?id=218104 + */ + .callback = mp2_disable_intr, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook x360 435 G7"), + }, + }, + {} +}; + static const struct dmi_system_id dmi_nodevs[] = { { /* @@ -311,6 +333,8 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i if (dmi_first_match(dmi_nodevs)) return -ENODEV; + dmi_check_system(dmi_sfh_table); + privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL); if (!privdata) return -ENOMEM; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h index dfb7cabd8..2b125cd97 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h @@ -89,10 +89,10 @@ enum mem_use_type { struct hpd_status { union { struct { - u32 human_presence_report : 4; - u32 human_presence_actual : 4; - u32 probablity : 8; u32 object_distance : 16; + u32 probablity : 8; + u32 human_presence_actual : 4; + u32 human_presence_report : 4; } shpd; u32 val; }; diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 149a3c743..f86c1ea83 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -54,10 +54,10 @@ struct lenovo_drvdata { /* 0: Up * 1: Down (undecided) * 2: Scrolling - * 3: Patched firmware, disable workaround */ u8 middlebutton_state; bool fn_lock; + bool middleclick_workaround_cptkbd; }; #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) @@ -621,6 +621,36 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev, return count; } +static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hid_device *hdev = to_hid_device(dev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); + + return snprintf(buf, PAGE_SIZE, "%u\n", + cptkbd_data->middleclick_workaround_cptkbd); +} + +static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct hid_device *hdev = to_hid_device(dev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); + int value; + + if (kstrtoint(buf, 10, &value)) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + + cptkbd_data->middleclick_workaround_cptkbd = !!value; + + return count; +} + static struct device_attribute dev_attr_fn_lock = __ATTR(fn_lock, S_IWUSR | S_IRUGO, @@ -632,10 +662,16 @@ static struct device_attribute dev_attr_sensitivity_cptkbd = attr_sensitivity_show_cptkbd, attr_sensitivity_store_cptkbd); +static struct device_attribute dev_attr_middleclick_workaround_cptkbd = + __ATTR(middleclick_workaround, S_IWUSR | S_IRUGO, + attr_middleclick_workaround_show_cptkbd, + attr_middleclick_workaround_store_cptkbd); + static struct attribute *lenovo_attributes_cptkbd[] = { &dev_attr_fn_lock.attr, &dev_attr_sensitivity_cptkbd.attr, + &dev_attr_middleclick_workaround_cptkbd.attr, NULL }; @@ -686,23 +722,7 @@ static int lenovo_event_cptkbd(struct hid_device *hdev, { struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); - if (cptkbd_data->middlebutton_state != 3) { - /* REL_X and REL_Y events during middle button pressed - * are only possible on patched, bug-free firmware - * so set middlebutton_state to 3 - * to never apply workaround anymore - */ - if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD && - cptkbd_data->middlebutton_state == 1 && - usage->type == EV_REL && - (usage->code == REL_X || usage->code == REL_Y)) { - cptkbd_data->middlebutton_state = 3; - /* send middle button press which was hold before */ - input_event(field->hidinput->input, - EV_KEY, BTN_MIDDLE, 1); - input_sync(field->hidinput->input); - } - + if (cptkbd_data->middleclick_workaround_cptkbd) { /* "wheel" scroll events */ if (usage->type == EV_REL && (usage->code == REL_WHEEL || usage->code == REL_HWHEEL)) { @@ -1166,6 +1186,7 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev) cptkbd_data->middlebutton_state = 0; cptkbd_data->fn_lock = true; cptkbd_data->sensitivity = 0x05; + cptkbd_data->middleclick_workaround_cptkbd = true; lenovo_features_set_cptkbd(hdev); ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 5ec1f1741..3816fd06b 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2155,6 +2155,10 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xcddc) }, + + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce08) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index 3bfd12ff4..6868db4ac 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = { MODULE_DEVICE_TABLE(i2c, amc6821_id); +static const struct of_device_id __maybe_unused amc6821_of_match[] = { + { + .compatible = "ti,amc6821", + .data = (void *)amc6821, + }, + { } +}; + +MODULE_DEVICE_TABLE(of, amc6821_of_match); + static struct i2c_driver amc6821_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "amc6821", + .of_match_table = of_match_ptr(amc6821_of_match), }, .probe_new = amc6821_probe, .id_table = amc6821_id, diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c index 8d8fa8e8a..20a9cddb3 100644 --- a/drivers/hwtracing/ptt/hisi_ptt.c +++ b/drivers/hwtracing/ptt/hisi_ptt.c @@ -654,6 +654,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event) int ret; u32 val; + if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type) + return -ENOENT; + if (event->cpu < 0) { dev_dbg(event->pmu->dev, "Per-task mode not supported\n"); return -EOPNOTSUPP; @@ -662,9 +665,6 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event) if (event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP; - if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type) - return -ENOENT; - ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 9a4e9bf30..1c9708426 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1422,7 +1422,6 @@ static void i801_add_mux(struct i801_priv *priv) lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip, mux_config->gpios[i], "mux", 0); gpiod_add_lookup_table(lookup); - priv->lookup = lookup; /* * Register the mux device, we use PLATFORM_DEVID_NONE here @@ -1436,7 +1435,10 @@ static void i801_add_mux(struct i801_priv *priv) sizeof(struct i2c_mux_gpio_platform_data)); if (IS_ERR(priv->mux_pdev)) { gpiod_remove_lookup_table(lookup); + devm_kfree(dev, lookup); dev_err(dev, "Failed to register i2c-mux-gpio device\n"); + } else { + priv->lookup = lookup; } } diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index 7c7d78040..f1a41b925 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -1444,9 +1444,11 @@ static int adxl367_verify_devid(struct adxl367_state *st) unsigned int val; int ret; - ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val, - val == ADXL367_DEVID_AD, 1000, 10000); + ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val); if (ret) + return dev_err_probe(st->dev, ret, "Failed to read dev id\n"); + + if (val != ADXL367_DEVID_AD) return dev_err_probe(st->dev, -ENODEV, "Invalid dev id 0x%02X, expected 0x%02X\n", val, ADXL367_DEVID_AD); @@ -1543,6 +1545,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops, if (ret) return ret; + fsleep(15000); + ret = adxl367_verify_devid(st); if (ret) return ret; diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c index 3606efa25..5c0409152 100644 --- a/drivers/iio/accel/adxl367_i2c.c +++ b/drivers/iio/accel/adxl367_i2c.c @@ -11,7 +11,7 @@ #include "adxl367.h" -#define ADXL367_I2C_FIFO_DATA 0x42 +#define ADXL367_I2C_FIFO_DATA 0x18 struct adxl367_i2c_state { struct regmap *regmap; diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 3a9b9a28d..453188db3 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client) { int ret; - down_write(&clients_rwsem); + lockdep_assert_held(&clients_rwsem); /* * The add/remove callbacks must be called in FIFO/LIFO order. To * achieve this we assign client_ids so they are sorted in @@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client) client->client_id = highest_client_id; ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); if (ret) - goto out; + return ret; highest_client_id++; xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); - -out: - up_write(&clients_rwsem); - return ret; + return 0; } static void remove_client_id(struct ib_client *client) @@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client) { struct ib_device *device; unsigned long index; + bool need_unreg = false; int ret; refcount_set(&client->uses, 1); init_completion(&client->uses_zero); + + /* + * The devices_rwsem is held in write mode to ensure that a racing + * ib_register_device() sees a consisent view of clients and devices. + */ + down_write(&devices_rwsem); + down_write(&clients_rwsem); ret = assign_client_id(client); if (ret) - return ret; + goto out; - down_read(&devices_rwsem); + need_unreg = true; xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { ret = add_client_context(device, client); - if (ret) { - up_read(&devices_rwsem); - ib_unregister_client(client); - return ret; - } + if (ret) + goto out; } - up_read(&devices_rwsem); - return 0; + ret = 0; +out: + up_write(&clients_rwsem); + up_write(&devices_rwsem); + if (need_unreg && ret) + ib_unregister_client(client); + return ret; } EXPORT_SYMBOL(ib_register_client); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 1112afa0a..8748b65c8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -595,6 +595,13 @@ struct hns_roce_work { u32 queue_num; }; +enum hns_roce_cong_type { + CONG_TYPE_DCQCN, + CONG_TYPE_LDCP, + CONG_TYPE_HC3, + CONG_TYPE_DIP, +}; + struct hns_roce_qp { struct ib_qp ibqp; struct hns_roce_wq rq; @@ -639,6 +646,7 @@ struct hns_roce_qp { struct list_head sq_node; /* all send qps are on a list */ struct hns_user_mmap_entry *dwqe_mmap_entry; u32 config; + enum hns_roce_cong_type cong_type; }; struct hns_roce_ib_iboe { @@ -710,13 +718,6 @@ struct hns_roce_eq_table { struct hns_roce_eq *eq; }; -enum cong_type { - CONG_TYPE_DCQCN, - CONG_TYPE_LDCP, - CONG_TYPE_HC3, - CONG_TYPE_DIP, -}; - struct hns_roce_caps { u64 fw_ver; u8 num_ports; @@ -847,7 +848,7 @@ struct hns_roce_caps { u16 default_aeq_period; u16 default_aeq_arm_st; u16 default_ceq_arm_st; - enum cong_type cong_type; + enum hns_roce_cong_type cong_type; }; enum hns_roce_device_state { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 58fbb1d3b..d06b19e69 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4886,12 +4886,15 @@ static int check_cong_type(struct ib_qp *ibqp, struct hns_roce_congestion_algorithm *cong_alg) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - if (ibqp->qp_type == IB_QPT_UD) - hr_dev->caps.cong_type = CONG_TYPE_DCQCN; + if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI) + hr_qp->cong_type = CONG_TYPE_DCQCN; + else + hr_qp->cong_type = hr_dev->caps.cong_type; /* different congestion types match different configurations */ - switch (hr_dev->caps.cong_type) { + switch (hr_qp->cong_type) { case CONG_TYPE_DCQCN: cong_alg->alg_sel = CONG_DCQCN; cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; @@ -4919,8 +4922,8 @@ static int check_cong_type(struct ib_qp *ibqp, default: ibdev_warn(&hr_dev->ib_dev, "invalid type(%u) for congestion selection.\n", - hr_dev->caps.cong_type); - hr_dev->caps.cong_type = CONG_TYPE_DCQCN; + hr_qp->cong_type); + hr_qp->cong_type = CONG_TYPE_DCQCN; cong_alg->alg_sel = CONG_DCQCN; cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; cong_alg->dip_vld = DIP_INVALID; @@ -4939,6 +4942,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr, struct hns_roce_congestion_algorithm cong_field; struct ib_device *ibdev = ibqp->device; struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); u32 dip_idx = 0; int ret; @@ -4951,7 +4955,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr, return ret; hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id + - hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE); + hr_qp->cong_type * HNS_ROCE_CONG_SIZE); hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID); hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel); hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL); diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index 280d633d4..d691cdef5 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -1415,6 +1415,78 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp, } /** + * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ + * @ukinfo: qp initialization info + * @sq_shift: Returns shift of SQ + * @rq_shift: Returns shift of RQ + */ +void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift, + u8 *rq_shift) +{ + bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; + + irdma_get_wqe_shift(ukinfo->uk_attrs, + imm_support ? ukinfo->max_sq_frag_cnt + 1 : + ukinfo->max_sq_frag_cnt, + ukinfo->max_inline_data, sq_shift); + + irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, + rq_shift); + + if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { + if (ukinfo->abi_ver > 4) + *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; + } +} + +/** + * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size. + * @ukinfo: qp initialization info + * @sq_depth: Returns depth of SQ + * @sq_shift: Returns shift of SQ + */ +int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo, + u32 *sq_depth, u8 *sq_shift) +{ + bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2; + int status; + + irdma_get_wqe_shift(ukinfo->uk_attrs, + imm_support ? ukinfo->max_sq_frag_cnt + 1 : + ukinfo->max_sq_frag_cnt, + ukinfo->max_inline_data, sq_shift); + status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size, + *sq_shift, sq_depth); + + return status; +} + +/** + * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size. + * @ukinfo: qp initialization info + * @rq_depth: Returns depth of RQ + * @rq_shift: Returns shift of RQ + */ +int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo, + u32 *rq_depth, u8 *rq_shift) +{ + int status; + + irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0, + rq_shift); + + if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) { + if (ukinfo->abi_ver > 4) + *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; + } + + status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size, + *rq_shift, rq_depth); + + return status; +} + +/** * irdma_uk_qp_init - initialize shared qp * @qp: hw qp (user and kernel) * @info: qp initialization info @@ -1428,23 +1500,12 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) { int ret_code = 0; u32 sq_ring_size; - u8 sqshift, rqshift; qp->uk_attrs = info->uk_attrs; if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) return -EINVAL; - irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift); - if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) { - irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt, - info->max_inline_data, &sqshift); - if (info->abi_ver > 4) - rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; - } else { - irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1, - info->max_inline_data, &sqshift); - } qp->qp_caps = info->qp_caps; qp->sq_base = info->sq; qp->rq_base = info->rq; @@ -1458,7 +1519,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) qp->sq_size = info->sq_size; qp->push_mode = false; qp->max_sq_frag_cnt = info->max_sq_frag_cnt; - sq_ring_size = qp->sq_size << sqshift; + sq_ring_size = qp->sq_size << info->sq_shift; IRDMA_RING_INIT(qp->sq_ring, sq_ring_size); IRDMA_RING_INIT(qp->initial_ring, sq_ring_size); if (info->first_sq_wq) { @@ -1473,9 +1534,9 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) qp->rq_size = info->rq_size; qp->max_rq_frag_cnt = info->max_rq_frag_cnt; qp->max_inline_data = info->max_inline_data; - qp->rq_wqe_size = rqshift; + qp->rq_wqe_size = info->rq_shift; IRDMA_RING_INIT(qp->rq_ring, qp->rq_size); - qp->rq_wqe_size_multiplier = 1 << rqshift; + qp->rq_wqe_size_multiplier = 1 << info->rq_shift; if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) qp->wqe_ops = iw_wqe_uk_ops_gen_1; else diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index d0cdf609f..1e0e1a71d 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -295,6 +295,12 @@ void irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info); int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info); +void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift, + u8 *rq_shift); +int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo, + u32 *sq_depth, u8 *sq_shift); +int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo, + u32 *rq_depth, u8 *rq_shift); struct irdma_sq_uk_wr_trk_info { u64 wrid; u32 wr_len; @@ -374,8 +380,12 @@ struct irdma_qp_uk_init_info { u32 max_sq_frag_cnt; u32 max_rq_frag_cnt; u32 max_inline_data; + u32 sq_depth; + u32 rq_depth; u8 first_sq_wq; u8 type; + u8 sq_shift; + u8 rq_shift; int abi_ver; bool legacy_mode; }; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 42c671f20..76c5f461f 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -277,7 +277,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct irdma_alloc_ucontext_req req = {}; struct irdma_alloc_ucontext_resp uresp = {}; struct irdma_ucontext *ucontext = to_ucontext(uctx); - struct irdma_uk_attrs *uk_attrs; + struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) @@ -292,7 +292,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, ucontext->iwdev = iwdev; ucontext->abi_ver = req.userspace_ver; - uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR) + ucontext->use_raw_attrs = true; + /* GEN_1 legacy support with libi40iw */ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { if (uk_attrs->hw_rev != IRDMA_GEN_1) @@ -327,6 +329,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; uresp.hw_rev = uk_attrs->hw_rev; + uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR; if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) { rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); @@ -567,6 +570,86 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev, } /** + * irdma_setup_umode_qp - setup sq and rq size in user mode qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @info: initialize info to return + * @init_attr: Initial QP create attributes + */ +static int irdma_setup_umode_qp(struct ib_udata *udata, + struct irdma_device *iwdev, + struct irdma_qp *iwqp, + struct irdma_qp_init_info *info, + struct ib_qp_init_attr *init_attr) +{ + struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, + struct irdma_ucontext, ibucontext); + struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; + struct irdma_create_qp_req req; + unsigned long flags; + int ret; + + ret = ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen)); + if (ret) { + ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n"); + return ret; + } + + iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; + iwqp->user_mode = 1; + if (req.user_wqe_bufs) { + info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode; + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, + &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + + if (!iwqp->iwpbl) { + ret = -ENODATA; + ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n"); + return ret; + } + } + + if (!ucontext->use_raw_attrs) { + /** + * Maintain backward compat with older ABI which passes sq and + * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr. + * There is no way to compute the correct value of + * iwqp->max_send_wr/max_recv_wr in the kernel. + */ + iwqp->max_send_wr = init_attr->cap.max_send_wr; + iwqp->max_recv_wr = init_attr->cap.max_recv_wr; + ukinfo->sq_size = init_attr->cap.max_send_wr; + ukinfo->rq_size = init_attr->cap.max_recv_wr; + irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, + &ukinfo->rq_shift); + } else { + ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, + &ukinfo->sq_shift); + if (ret) + return ret; + + ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, + &ukinfo->rq_shift); + if (ret) + return ret; + + iwqp->max_send_wr = + (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; + iwqp->max_recv_wr = + (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; + ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; + ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; + } + + irdma_setup_virt_qp(iwdev, iwqp, info); + + return 0; +} + +/** * irdma_setup_kmode_qp - setup initialization for kernel mode qp * @iwdev: iwarp device * @iwqp: qp ptr (user or kernel) @@ -579,40 +662,28 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, struct ib_qp_init_attr *init_attr) { struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem; - u32 sqdepth, rqdepth; - u8 sqshift, rqshift; u32 size; int status; struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; - struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; - irdma_get_wqe_shift(uk_attrs, - uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 : - ukinfo->max_sq_frag_cnt, - ukinfo->max_inline_data, &sqshift); - status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift, - &sqdepth); + status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, + &ukinfo->sq_shift); if (status) return status; - if (uk_attrs->hw_rev == IRDMA_GEN_1) - rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; - else - irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0, - &rqshift); - - status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift, - &rqdepth); + status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, + &ukinfo->rq_shift); if (status) return status; iwqp->kqp.sq_wrid_mem = - kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); + kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); if (!iwqp->kqp.sq_wrid_mem) return -ENOMEM; iwqp->kqp.rq_wrid_mem = - kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); + kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); + if (!iwqp->kqp.rq_wrid_mem) { kfree(iwqp->kqp.sq_wrid_mem); iwqp->kqp.sq_wrid_mem = NULL; @@ -622,7 +693,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; - size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE; + size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; size += (IRDMA_SHADOW_AREA_SIZE << 3); mem->size = ALIGN(size, 256); @@ -638,16 +709,18 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, ukinfo->sq = mem->va; info->sq_pa = mem->pa; - ukinfo->rq = &ukinfo->sq[sqdepth]; - info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE); - ukinfo->shadow_area = ukinfo->rq[rqdepth].elem; - info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE); - ukinfo->sq_size = sqdepth >> sqshift; - ukinfo->rq_size = rqdepth >> rqshift; - ukinfo->qp_id = iwqp->ibqp.qp_num; - - init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift; - init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift; + ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth]; + info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE); + ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; + info->shadow_area_pa = + info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); + ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; + ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; + + iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; + iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; + init_attr->cap.max_send_wr = iwqp->max_send_wr; + init_attr->cap.max_recv_wr = iwqp->max_recv_wr; return 0; } @@ -805,18 +878,14 @@ static int irdma_create_qp(struct ib_qp *ibqp, struct irdma_device *iwdev = to_iwdev(ibpd->device); struct irdma_pci_f *rf = iwdev->rf; struct irdma_qp *iwqp = to_iwqp(ibqp); - struct irdma_create_qp_req req = {}; struct irdma_create_qp_resp uresp = {}; u32 qp_num = 0; int err_code; - int sq_size; - int rq_size; struct irdma_sc_qp *qp; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; struct irdma_qp_init_info init_info = {}; struct irdma_qp_host_ctx_info *ctx_info; - unsigned long flags; err_code = irdma_validate_qp_attrs(init_attr, iwdev); if (err_code) @@ -826,13 +895,10 @@ static int irdma_create_qp(struct ib_qp *ibqp, udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) return -EINVAL; - sq_size = init_attr->cap.max_send_wr; - rq_size = init_attr->cap.max_recv_wr; - init_info.vsi = &iwdev->vsi; init_info.qp_uk_init_info.uk_attrs = uk_attrs; - init_info.qp_uk_init_info.sq_size = sq_size; - init_info.qp_uk_init_info.rq_size = rq_size; + init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr; + init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr; init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; @@ -874,7 +940,7 @@ static int irdma_create_qp(struct ib_qp *ibqp, iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE; init_info.pd = &iwpd->sc_pd; - init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; + init_info.qp_uk_init_info.qp_id = qp_num; if (!rdma_protocol_roce(&iwdev->ibdev, 1)) init_info.qp_uk_init_info.first_sq_wq = 1; iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; @@ -882,36 +948,9 @@ static int irdma_create_qp(struct ib_qp *ibqp, init_waitqueue_head(&iwqp->mod_qp_waitq); if (udata) { - err_code = ib_copy_from_udata(&req, udata, - min(sizeof(req), udata->inlen)); - if (err_code) { - ibdev_dbg(&iwdev->ibdev, - "VERBS: ib_copy_from_data fail\n"); - goto error; - } - - iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; - iwqp->user_mode = 1; - if (req.user_wqe_bufs) { - struct irdma_ucontext *ucontext = - rdma_udata_to_drv_context(udata, - struct irdma_ucontext, - ibucontext); - - init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode; - spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); - iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, - &ucontext->qp_reg_mem_list); - spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); - - if (!iwqp->iwpbl) { - err_code = -ENODATA; - ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n"); - goto error; - } - } init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; - irdma_setup_virt_qp(iwdev, iwqp, &init_info); + err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, + init_attr); } else { INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; @@ -966,8 +1005,6 @@ static int irdma_create_qp(struct ib_qp *ibqp, spin_lock_init(&iwqp->sc_qp.pfpdu.lock); iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; rf->qp_table[qp_num] = iwqp; - iwqp->max_send_wr = sq_size; - iwqp->max_recv_wr = rq_size; if (rdma_protocol_roce(&iwdev->ibdev, 1)) { if (dev->ws_add(&iwdev->vsi, 0)) { @@ -988,8 +1025,8 @@ static int irdma_create_qp(struct ib_qp *ibqp, if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) uresp.lsmm = 1; } - uresp.actual_sq_size = sq_size; - uresp.actual_rq_size = rq_size; + uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size; + uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size; uresp.qp_id = qp_num; uresp.qp_caps = qp->qp_uk.qp_caps; diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index 9f9e273bb..0bc0d0faa 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -18,7 +18,8 @@ struct irdma_ucontext { struct list_head qp_reg_mem_list; spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */ int abi_ver; - bool legacy_mode; + u8 legacy_mode : 1; + u8 use_raw_attrs : 1; }; struct irdma_pd { diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index f8e2baed2..7013ce205 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2951,7 +2951,7 @@ DECLARE_UVERBS_NAMED_METHOD( MLX5_IB_METHOD_DEVX_OBJ_MODIFY, UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, UVERBS_IDR_ANY_OBJECT, - UVERBS_ACCESS_WRITE, + UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_IN( MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index 855f3f4fe..737db67a9 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, */ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start, left); - memcpy(eseg->inline_hdr.start, pdata, copysz); + memcpy(eseg->inline_hdr.data, pdata, copysz); stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) - sizeof(eseg->inline_hdr.start) + copysz, 16); *size += stride / 16; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c index d3c436ead..4aa80c938 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c @@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev, /* distinguish "mi" and "min-latency" with length */ len = strnlen(buf, NAME_MAX); - if (buf[len - 1] == '\n') + if (len && buf[len - 1] == '\n') len--; if (!strncasecmp(buf, "round-robin", 11) || diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index cffa93f11..fd6c260d5 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3209,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device) INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, srpt_event_handler); - ib_register_event_handler(&sdev->event_handler); for (i = 1; i <= sdev->device->phys_port_cnt; i++) { sport = &sdev->port[i - 1]; @@ -3232,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device) } } + ib_register_event_handler(&sdev->event_handler); spin_lock(&srpt_dev_lock); list_add_tail(&sdev->list, &srpt_dev_list); spin_unlock(&srpt_dev_lock); @@ -3242,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device) err_port: srpt_unregister_mad_agent(sdev, i); - ib_unregister_event_handler(&sdev->event_handler); err_cm: if (sdev->cm_id) ib_destroy_cm_id(sdev->cm_id); diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c index c3937d2fc..a0f9978c6 100644 --- a/drivers/input/keyboard/gpio_keys_polled.c +++ b/drivers/input/keyboard/gpio_keys_polled.c @@ -319,12 +319,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev) error = devm_gpio_request_one(dev, button->gpio, flags, button->desc ? : DRV_NAME); - if (error) { - dev_err(dev, - "unable to claim gpio %u, err=%d\n", - button->gpio, error); - return error; - } + if (error) + return dev_err_probe(dev, error, + "unable to claim gpio %u\n", + button->gpio); bdata->gpiod = gpio_to_desc(button->gpio); if (!bdata->gpiod) { diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index dc5f7a156..dc19e7fb0 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -192,7 +192,7 @@ source "drivers/iommu/intel/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI - select DMAR_TABLE + select DMAR_TABLE if INTEL_IOMMU help Supports Interrupt remapping for IO-APIC and MSI devices. To use x2apic mode in the CPU's which support x2APIC enhancements or diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index f6e64c985..cc94ac666 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2047,6 +2047,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) /* Prevent binding other PCI device drivers to IOMMU devices */ iommu->dev->match_driver = false; + /* ACPI _PRT won't have an IRQ for IOMMU */ + iommu->dev->irq_managed = 1; + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index aa6d62cc5..3fa66dba0 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1547,6 +1547,14 @@ static size_t iommu_dma_opt_mapping_size(void) return iova_rcache_range(); } +static size_t iommu_dma_max_mapping_size(struct device *dev) +{ + if (dev_is_untrusted(dev)) + return swiotlb_max_mapping_size(dev); + + return SIZE_MAX; +} + static const struct dma_map_ops iommu_dma_ops = { .flags = DMA_F_PCI_P2PDMA_SUPPORTED, .alloc = iommu_dma_alloc, @@ -1569,6 +1577,7 @@ static const struct dma_map_ops iommu_dma_ops = { .unmap_resource = iommu_dma_unmap_resource, .get_merge_boundary = iommu_dma_get_merge_boundary, .opt_mapping_size = iommu_dma_opt_mapping_size, + .max_mapping_size = iommu_dma_max_mapping_size, }; /* diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index b7dff5092..12e1e90fd 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -96,4 +96,15 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON passing intel_iommu=sm_on to the kernel. If not sure, please use the default value. +config INTEL_IOMMU_PERF_EVENTS + def_bool y + bool "Intel IOMMU performance events" + depends on INTEL_IOMMU && PERF_EVENTS + help + Selecting this option will enable the performance monitoring + infrastructure in the Intel IOMMU. It collects information about + key events occurring during operation of the remapping hardware, + to aid performance tuning and debug. These are available on modern + processors which support Intel VT-d 4.0 and later. + endif # INTEL_IOMMU diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile index fa0dae164..29d26a437 100644 --- a/drivers/iommu/intel/Makefile +++ b/drivers/iommu/intel/Makefile @@ -5,4 +5,7 @@ obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o obj-$(CONFIG_DMAR_PERF) += perf.o obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o +ifdef CONFIG_INTEL_IOMMU obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o +endif +obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 418af1db0..4759f79ad 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -34,6 +34,7 @@ #include "../irq_remapping.h" #include "perf.h" #include "trace.h" +#include "perfmon.h" typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); struct dmar_res_callback { @@ -1104,6 +1105,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) if (sts & DMA_GSTS_QIES) iommu->gcmd |= DMA_GCMD_QIE; + if (alloc_iommu_pmu(iommu)) + pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id); + raw_spin_lock_init(&iommu->register_lock); /* @@ -1131,6 +1135,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) err_sysfs: iommu_device_sysfs_remove(&iommu->iommu); err_unmap: + free_iommu_pmu(iommu); unmap_iommu(iommu); error_free_seq_id: ida_free(&dmar_seq_ids, iommu->seq_id); @@ -1146,6 +1151,8 @@ static void free_iommu(struct intel_iommu *iommu) iommu_device_sysfs_remove(&iommu->iommu); } + free_iommu_pmu(iommu); + if (iommu->irq) { if (iommu->pr_irq) { free_irq(iommu->pr_irq, iommu); diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index c99cb715b..c1348beda 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -125,6 +125,11 @@ #define DMAR_MTRR_PHYSMASK8_REG 0x208 #define DMAR_MTRR_PHYSBASE9_REG 0x210 #define DMAR_MTRR_PHYSMASK9_REG 0x218 +#define DMAR_PERFCAP_REG 0x300 +#define DMAR_PERFCFGOFF_REG 0x310 +#define DMAR_PERFOVFOFF_REG 0x318 +#define DMAR_PERFCNTROFF_REG 0x31c +#define DMAR_PERFEVNTCAP_REG 0x380 #define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */ #define DMAR_VCMD_REG 0xe00 /* Virtual command register */ #define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */ @@ -148,6 +153,7 @@ */ #define cap_esrtps(c) (((c) >> 63) & 1) #define cap_esirtps(c) (((c) >> 62) & 1) +#define cap_ecmds(c) (((c) >> 61) & 1) #define cap_fl5lp_support(c) (((c) >> 60) & 1) #define cap_pi_support(c) (((c) >> 59) & 1) #define cap_fl1gp_support(c) (((c) >> 56) & 1) @@ -179,7 +185,8 @@ * Extended Capability Register */ -#define ecap_rps(e) (((e) >> 49) & 0x1) +#define ecap_pms(e) (((e) >> 51) & 0x1) +#define ecap_rps(e) (((e) >> 49) & 0x1) #define ecap_smpwc(e) (((e) >> 48) & 0x1) #define ecap_flts(e) (((e) >> 47) & 0x1) #define ecap_slts(e) (((e) >> 46) & 0x1) @@ -210,6 +217,22 @@ #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf) #define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */ +/* + * Decoding Perf Capability Register + */ +#define pcap_num_cntr(p) ((p) & 0xffff) +#define pcap_cntr_width(p) (((p) >> 16) & 0x7f) +#define pcap_num_event_group(p) (((p) >> 24) & 0x1f) +#define pcap_filters_mask(p) (((p) >> 32) & 0x1f) +#define pcap_interrupt(p) (((p) >> 50) & 0x1) +/* The counter stride is calculated as 2 ^ (x+10) bytes */ +#define pcap_cntr_stride(p) (1ULL << ((((p) >> 52) & 0x7) + 10)) + +/* + * Decoding Perf Event Capability Register + */ +#define pecap_es(p) ((p) & 0xfffffff) + /* Virtual command interface capability */ #define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ @@ -561,6 +584,22 @@ struct dmar_domain { iommu core */ }; +struct iommu_pmu { + struct intel_iommu *iommu; + u32 num_cntr; /* Number of counters */ + u32 num_eg; /* Number of event group */ + u32 cntr_width; /* Counter width */ + u32 cntr_stride; /* Counter Stride */ + u32 filter; /* Bitmask of filter support */ + void __iomem *base; /* the PerfMon base address */ + void __iomem *cfg_reg; /* counter configuration base address */ + void __iomem *cntr_reg; /* counter 0 address*/ + void __iomem *overflow; /* overflow status register */ + + u64 *evcap; /* Indicates all supported events */ + u32 **cntr_evcap; /* Supported events of each counter. */ +}; + struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 reg_phys; /* physical address of hw register set */ @@ -608,6 +647,8 @@ struct intel_iommu { struct dmar_drhd_unit *drhd; void *perf_statistic; + + struct iommu_pmu *pmu; }; /* PCI domain-device relationship */ diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 3f03039e5..32432d82d 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -435,6 +435,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu, if (!info || !info->ats_enabled) return; + if (pci_dev_is_disconnected(to_pci_dev(dev))) + return; + sid = info->bus << 8 | info->devfn; qdep = info->ats_qdep; pfsid = info->pfsid; diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c new file mode 100644 index 000000000..db5791a54 --- /dev/null +++ b/drivers/iommu/intel/perfmon.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support Intel IOMMU PerfMon + * Copyright(c) 2023 Intel Corporation. + */ +#define pr_fmt(fmt) "DMAR: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) + +#include <linux/dmar.h> +#include "iommu.h" +#include "perfmon.h" + +static inline void __iomem * +get_perf_reg_address(struct intel_iommu *iommu, u32 offset) +{ + u32 off = dmar_readl(iommu->reg + offset); + + return iommu->reg + off; +} + +int alloc_iommu_pmu(struct intel_iommu *iommu) +{ + struct iommu_pmu *iommu_pmu; + int i, j, ret; + u64 perfcap; + u32 cap; + + if (!ecap_pms(iommu->ecap)) + return 0; + + /* The IOMMU PMU requires the ECMD support as well */ + if (!cap_ecmds(iommu->cap)) + return -ENODEV; + + perfcap = dmar_readq(iommu->reg + DMAR_PERFCAP_REG); + /* The performance monitoring is not supported. */ + if (!perfcap) + return -ENODEV; + + /* Sanity check for the number of the counters and event groups */ + if (!pcap_num_cntr(perfcap) || !pcap_num_event_group(perfcap)) + return -ENODEV; + + /* The interrupt on overflow is required */ + if (!pcap_interrupt(perfcap)) + return -ENODEV; + + iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL); + if (!iommu_pmu) + return -ENOMEM; + + iommu_pmu->num_cntr = pcap_num_cntr(perfcap); + iommu_pmu->cntr_width = pcap_cntr_width(perfcap); + iommu_pmu->filter = pcap_filters_mask(perfcap); + iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap); + iommu_pmu->num_eg = pcap_num_event_group(perfcap); + + iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL); + if (!iommu_pmu->evcap) { + ret = -ENOMEM; + goto free_pmu; + } + + /* Parse event group capabilities */ + for (i = 0; i < iommu_pmu->num_eg; i++) { + u64 pcap; + + pcap = dmar_readq(iommu->reg + DMAR_PERFEVNTCAP_REG + + i * IOMMU_PMU_CAP_REGS_STEP); + iommu_pmu->evcap[i] = pecap_es(pcap); + } + + iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL); + if (!iommu_pmu->cntr_evcap) { + ret = -ENOMEM; + goto free_pmu_evcap; + } + for (i = 0; i < iommu_pmu->num_cntr; i++) { + iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL); + if (!iommu_pmu->cntr_evcap[i]) { + ret = -ENOMEM; + goto free_pmu_cntr_evcap; + } + /* + * Set to the global capabilities, will adjust according + * to per-counter capabilities later. + */ + for (j = 0; j < iommu_pmu->num_eg; j++) + iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j]; + } + + iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG); + iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG); + iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG); + + /* + * Check per-counter capabilities. All counters should have the + * same capabilities on Interrupt on Overflow Support and Counter + * Width. + */ + for (i = 0; i < iommu_pmu->num_cntr; i++) { + cap = dmar_readl(iommu_pmu->cfg_reg + + i * IOMMU_PMU_CFG_OFFSET + + IOMMU_PMU_CFG_CNTRCAP_OFFSET); + if (!iommu_cntrcap_pcc(cap)) + continue; + + /* + * It's possible that some counters have a different + * capability because of e.g., HW bug. Check the corner + * case here and simply drop those counters. + */ + if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) || + !iommu_cntrcap_ios(cap)) { + iommu_pmu->num_cntr = i; + pr_warn("PMU counter capability inconsistent, counter number reduced to %d\n", + iommu_pmu->num_cntr); + } + + /* Clear the pre-defined events group */ + for (j = 0; j < iommu_pmu->num_eg; j++) + iommu_pmu->cntr_evcap[i][j] = 0; + + /* Override with per-counter event capabilities */ + for (j = 0; j < iommu_cntrcap_egcnt(cap); j++) { + cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET + + IOMMU_PMU_CFG_CNTREVCAP_OFFSET + + (j * IOMMU_PMU_OFF_REGS_STEP)); + iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap); + /* + * Some events may only be supported by a specific counter. + * Track them in the evcap as well. + */ + iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap); + } + } + + iommu_pmu->iommu = iommu; + iommu->pmu = iommu_pmu; + + return 0; + +free_pmu_cntr_evcap: + for (i = 0; i < iommu_pmu->num_cntr; i++) + kfree(iommu_pmu->cntr_evcap[i]); + kfree(iommu_pmu->cntr_evcap); +free_pmu_evcap: + kfree(iommu_pmu->evcap); +free_pmu: + kfree(iommu_pmu); + + return ret; +} + +void free_iommu_pmu(struct intel_iommu *iommu) +{ + struct iommu_pmu *iommu_pmu = iommu->pmu; + + if (!iommu_pmu) + return; + + if (iommu_pmu->evcap) { + int i; + + for (i = 0; i < iommu_pmu->num_cntr; i++) + kfree(iommu_pmu->cntr_evcap[i]); + kfree(iommu_pmu->cntr_evcap); + } + kfree(iommu_pmu->evcap); + kfree(iommu_pmu); + iommu->pmu = NULL; +} diff --git a/drivers/iommu/intel/perfmon.h b/drivers/iommu/intel/perfmon.h new file mode 100644 index 000000000..4b0d9c1fe --- /dev/null +++ b/drivers/iommu/intel/perfmon.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * PERFCFGOFF_REG, PERFFRZOFF_REG + * PERFOVFOFF_REG, PERFCNTROFF_REG + */ +#define IOMMU_PMU_NUM_OFF_REGS 4 +#define IOMMU_PMU_OFF_REGS_STEP 4 + +#define IOMMU_PMU_CFG_OFFSET 0x100 +#define IOMMU_PMU_CFG_CNTRCAP_OFFSET 0x80 +#define IOMMU_PMU_CFG_CNTREVCAP_OFFSET 0x84 +#define IOMMU_PMU_CFG_SIZE 0x8 +#define IOMMU_PMU_CFG_FILTERS_OFFSET 0x4 + +#define IOMMU_PMU_CAP_REGS_STEP 8 + +#define iommu_cntrcap_pcc(p) ((p) & 0x1) +#define iommu_cntrcap_cw(p) (((p) >> 8) & 0xff) +#define iommu_cntrcap_ios(p) (((p) >> 16) & 0x1) +#define iommu_cntrcap_egcnt(p) (((p) >> 28) & 0xf) + +#define iommu_event_select(p) ((p) & 0xfffffff) +#define iommu_event_group(p) (((p) >> 28) & 0xf) + +#ifdef CONFIG_INTEL_IOMMU_PERF_EVENTS +int alloc_iommu_pmu(struct intel_iommu *iommu); +void free_iommu_pmu(struct intel_iommu *iommu); +#else +static inline int +alloc_iommu_pmu(struct intel_iommu *iommu) +{ + return 0; +} + +static inline void +free_iommu_pmu(struct intel_iommu *iommu) +{ +} +#endif /* CONFIG_INTEL_IOMMU_PERF_EVENTS */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8b3897239..83736824f 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1741,6 +1741,9 @@ static void probe_alloc_default_domain(struct bus_type *bus, { struct __group_domain_type gtype; + if (group->default_domain) + return; + memset(>ype, 0, sizeof(gtype)); /* Ask for default domain requirements of all devices in the group */ diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 83314b9d8..ee59647c2 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -99,7 +99,8 @@ int __init irq_remapping_prepare(void) if (disable_irq_remap) return -ENOSYS; - if (intel_irq_remap_ops.prepare() == 0) + if (IS_ENABLED(CONFIG_INTEL_IOMMU) && + intel_irq_remap_ops.prepare() == 0) remap_ops = &intel_irq_remap_ops; else if (IS_ENABLED(CONFIG_AMD_IOMMU) && amd_iommu_irq_ops.prepare() == 0) diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c index 10c3e85c9..be71459c7 100644 --- a/drivers/irqchip/irq-renesas-rzg2l.c +++ b/drivers/irqchip/irq-renesas-rzg2l.c @@ -28,8 +28,7 @@ #define ISCR 0x10 #define IITSR 0x14 #define TSCR 0x20 -#define TITSR0 0x24 -#define TITSR1 0x28 +#define TITSR(n) (0x24 + (n) * 4) #define TITSR0_MAX_INT 16 #define TITSEL_WIDTH 0x2 #define TSSR(n) (0x30 + ((n) * 4)) @@ -67,28 +66,43 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data) return data->domain->host_data; } -static void rzg2l_irq_eoi(struct irq_data *d) +static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq) { - unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; - struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hw_irq = hwirq - IRQC_IRQ_START; u32 bit = BIT(hw_irq); - u32 reg; + u32 iitsr, iscr; - reg = readl_relaxed(priv->base + ISCR); - if (reg & bit) - writel_relaxed(reg & ~bit, priv->base + ISCR); + iscr = readl_relaxed(priv->base + ISCR); + iitsr = readl_relaxed(priv->base + IITSR); + + /* + * ISCR can only be cleared if the type is falling-edge, rising-edge or + * falling/rising-edge. + */ + if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) { + writel_relaxed(iscr & ~bit, priv->base + ISCR); + /* + * Enforce that the posted write is flushed to prevent that the + * just handled interrupt is raised again. + */ + readl_relaxed(priv->base + ISCR); + } } -static void rzg2l_tint_eoi(struct irq_data *d) +static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq) { - unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START; - struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); - u32 bit = BIT(hw_irq); + u32 bit = BIT(hwirq - IRQC_TINT_START); u32 reg; reg = readl_relaxed(priv->base + TSCR); - if (reg & bit) + if (reg & bit) { writel_relaxed(reg & ~bit, priv->base + TSCR); + /* + * Enforce that the posted write is flushed to prevent that the + * just handled interrupt is raised again. + */ + readl_relaxed(priv->base + TSCR); + } } static void rzg2l_irqc_eoi(struct irq_data *d) @@ -98,9 +112,9 @@ static void rzg2l_irqc_eoi(struct irq_data *d) raw_spin_lock(&priv->lock); if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT) - rzg2l_irq_eoi(d); + rzg2l_clear_irq_int(priv, hw_irq); else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) - rzg2l_tint_eoi(d); + rzg2l_clear_tint_int(priv, hw_irq); raw_spin_unlock(&priv->lock); irq_chip_eoi_parent(d); } @@ -148,8 +162,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d) static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type) { - unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START; struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); + unsigned int hwirq = irqd_to_hwirq(d); + u32 iitseln = hwirq - IRQC_IRQ_START; + bool clear_irq_int = false; u16 sense, tmp; switch (type & IRQ_TYPE_SENSE_MASK) { @@ -159,14 +175,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type) case IRQ_TYPE_EDGE_FALLING: sense = IITSR_IITSEL_EDGE_FALLING; + clear_irq_int = true; break; case IRQ_TYPE_EDGE_RISING: sense = IITSR_IITSEL_EDGE_RISING; + clear_irq_int = true; break; case IRQ_TYPE_EDGE_BOTH: sense = IITSR_IITSEL_EDGE_BOTH; + clear_irq_int = true; break; default: @@ -175,22 +194,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type) raw_spin_lock(&priv->lock); tmp = readl_relaxed(priv->base + IITSR); - tmp &= ~IITSR_IITSEL_MASK(hw_irq); - tmp |= IITSR_IITSEL(hw_irq, sense); + tmp &= ~IITSR_IITSEL_MASK(iitseln); + tmp |= IITSR_IITSEL(iitseln, sense); + if (clear_irq_int) + rzg2l_clear_irq_int(priv, hwirq); writel_relaxed(tmp, priv->base + IITSR); raw_spin_unlock(&priv->lock); return 0; } +static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv, + u32 reg, u32 tssr_offset, u8 tssr_index) +{ + u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d); + u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset)); + + /* Clear the relevant byte in reg */ + reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset)); + /* Set TINT and leave TIEN clear */ + reg |= tint << TSSEL_SHIFT(tssr_offset); + writel_relaxed(reg, priv->base + TSSR(tssr_index)); + + return reg | tien; +} + static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) { struct rzg2l_irqc_priv *priv = irq_data_to_priv(d); unsigned int hwirq = irqd_to_hwirq(d); u32 titseln = hwirq - IRQC_TINT_START; - u32 offset; - u8 sense; - u32 reg; + u32 tssr_offset = TSSR_OFFSET(titseln); + u8 tssr_index = TSSR_INDEX(titseln); + u8 index, sense; + u32 reg, tssr; switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: @@ -205,17 +242,21 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type) return -EINVAL; } - offset = TITSR0; + index = 0; if (titseln >= TITSR0_MAX_INT) { titseln -= TITSR0_MAX_INT; - offset = TITSR1; + index = 1; } raw_spin_lock(&priv->lock); - reg = readl_relaxed(priv->base + offset); + tssr = readl_relaxed(priv->base + TSSR(tssr_index)); + tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index); + reg = readl_relaxed(priv->base + TITSR(index)); reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH)); reg |= sense << (titseln * TITSEL_WIDTH); - writel_relaxed(reg, priv->base + offset); + writel_relaxed(reg, priv->base + TITSR(index)); + rzg2l_clear_tint_int(priv, hwirq); + writel_relaxed(tssr, priv->base + TSSR(tssr_index)); raw_spin_unlock(&priv->lock); return 0; diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c index d3a30ad94..dd5d327c5 100644 --- a/drivers/leds/flash/leds-sgm3140.c +++ b/drivers/leds/flash/leds-sgm3140.c @@ -114,8 +114,11 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev, "failed to enable regulator: %d\n", ret); return ret; } + gpiod_set_value_cansleep(priv->flash_gpio, 0); gpiod_set_value_cansleep(priv->enable_gpio, 1); } else { + del_timer_sync(&priv->powerdown_timer); + gpiod_set_value_cansleep(priv->flash_gpio, 0); gpiod_set_value_cansleep(priv->enable_gpio, 0); ret = regulator_disable(priv->vin_regulator); if (ret) { diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c index 0b52fc909..3c0595857 100644 --- a/drivers/leds/leds-aw2013.c +++ b/drivers/leds/leds-aw2013.c @@ -397,6 +397,7 @@ error_reg: regulator_disable(chip->vcc_regulator); error: + mutex_unlock(&chip->mutex); mutex_destroy(&chip->mutex); return ret; } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 100a6a236..ec662f97b 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -614,7 +614,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector, io_req.mem.ptr.vma = (char *)b->data + offset; } - r = dm_io(&io_req, 1, ®ion, NULL); + r = dm_io(&io_req, 1, ®ion, NULL, IOPRIO_DEFAULT); if (unlikely(r)) b->end_io(b, errno_to_blk_status(r)); } @@ -1375,7 +1375,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) BUG_ON(dm_bufio_in_request()); - return dm_io(&io_req, 1, &io_reg, NULL); + return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT); } EXPORT_SYMBOL_GPL(dm_bufio_issue_flush); @@ -1398,7 +1398,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c BUG_ON(dm_bufio_in_request()); - return dm_io(&io_req, 1, &io_reg, NULL); + return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT); } EXPORT_SYMBOL_GPL(dm_bufio_issue_discard); diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index 6ba3e9c91..8bc21d548 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -75,7 +75,7 @@ struct dm_cache_policy { * background work. */ int (*get_background_work)(struct dm_cache_policy *p, bool idle, - struct policy_work **result); + struct policy_work **result); /* * You must pass in the same work pointer that you were given, not diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3e215aa85..25e51dc6e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -52,11 +52,11 @@ struct convert_context { struct completion restart; struct bio *bio_in; - struct bio *bio_out; struct bvec_iter iter_in; + struct bio *bio_out; struct bvec_iter iter_out; - u64 cc_sector; atomic_t cc_pending; + u64 cc_sector; union { struct skcipher_request *req; struct aead_request *req_aead; @@ -2535,7 +2535,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string type = &key_type_encrypted; set_key = set_key_encrypted; } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) && - !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { + !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { type = &key_type_trusted; set_key = set_key_trusted; } else { diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 3da4359f5..696365f8f 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -579,7 +579,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) } } - r = dm_io(&io_req, 1, &io_loc, NULL); + r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); if (unlikely(r)) return r; @@ -1089,7 +1089,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, io_loc.sector = ic->start + SB_SECTORS + sector; io_loc.count = n_sectors; - r = dm_io(&io_req, 1, &io_loc, NULL); + r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); if (unlikely(r)) { dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ? "reading journal" : "writing journal", r); @@ -1205,7 +1205,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u io_loc.sector = target; io_loc.count = n_sectors; - r = dm_io(&io_req, 1, &io_loc, NULL); + r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); if (unlikely(r)) { WARN_ONCE(1, "asynchronous dm_io failed: %d", r); fn(-1UL, data); @@ -1532,7 +1532,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat fr.io_reg.count = 0, fr.ic = ic; init_completion(&fr.comp); - r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL); + r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT); BUG_ON(r); } @@ -1709,7 +1709,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks struct bio_vec bv; sector_t sector, logical_sector, area, offset; struct page *page; - void *buffer; get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, @@ -1718,13 +1717,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks logical_sector = dio->range.logical_sector; page = mempool_alloc(&ic->recheck_pool, GFP_NOIO); - buffer = page_to_virt(page); __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { unsigned pos = 0; do { + sector_t alignment; char *mem; + char *buffer = page_to_virt(page); int r; struct dm_io_request io_req; struct dm_io_region io_loc; @@ -1737,7 +1737,15 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks io_loc.sector = sector; io_loc.count = ic->sectors_per_block; - r = dm_io(&io_req, 1, &io_loc, NULL); + /* Align the bio to logical block size */ + alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT); + alignment &= -alignment; + io_loc.sector = round_down(io_loc.sector, alignment); + io_loc.count += sector - io_loc.sector; + buffer += (sector - io_loc.sector) << SECTOR_SHIFT; + io_loc.count = round_up(io_loc.count, alignment); + + r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); if (unlikely(r)) { dio->bi_status = errno_to_blk_status(r); goto free_ret; @@ -1856,12 +1864,12 @@ again: r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); if (unlikely(r)) { + if (likely(checksums != checksums_onstack)) + kfree(checksums); if (r > 0) { - integrity_recheck(dio, checksums); + integrity_recheck(dio, checksums_onstack); goto skip_io; } - if (likely(checksums != checksums_onstack)) - kfree(checksums); goto error; } @@ -2367,7 +2375,6 @@ offload_to_thread: else skip_check: dec_in_flight(dio); - } else { INIT_WORK(&dio->work, integrity_metadata); queue_work(ic->metadata_wq, &dio->work); @@ -2775,7 +2782,7 @@ next_chunk: io_loc.sector = get_data_sector(ic, area, offset); io_loc.count = n_sectors; - r = dm_io(&io_req, 1, &io_loc, NULL); + r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); if (unlikely(r)) { dm_integrity_io_error(ic, "reading data", r); goto err; @@ -4151,7 +4158,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) { if (val < 1 << SECTOR_SHIFT || val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || - (val & (val -1))) { + (val & (val - 1))) { r = -EINVAL; ti->error = "Invalid block_size argument"; goto bad; @@ -4160,7 +4167,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) { log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval); } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) { - if (val >= (uint64_t)UINT_MAX * 1000 / HZ) { + if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) { r = -EINVAL; ti->error = "Invalid bitmap_flush_interval argument"; goto bad; @@ -4477,7 +4484,7 @@ try_smaller_buffer: if (ic->internal_hash) { size_t recalc_tags_size; ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); - if (!ic->recalc_wq ) { + if (!ic->recalc_wq) { ti->error = "Cannot allocate workqueue"; r = -ENOMEM; goto bad; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index e488b05e3..ec9765838 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -295,7 +295,7 @@ static void km_dp_init(struct dpages *dp, void *data) *---------------------------------------------------------------*/ static void do_region(const blk_opf_t opf, unsigned int region, struct dm_io_region *where, struct dpages *dp, - struct io *io) + struct io *io, unsigned short ioprio) { struct bio *bio; struct page *page; @@ -344,6 +344,7 @@ static void do_region(const blk_opf_t opf, unsigned int region, &io->client->bios); bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_end_io = endio; + bio->bi_ioprio = ioprio; store_io_and_region_in_bio(bio, io, region); if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { @@ -371,7 +372,7 @@ static void do_region(const blk_opf_t opf, unsigned int region, static void dispatch_io(blk_opf_t opf, unsigned int num_regions, struct dm_io_region *where, struct dpages *dp, - struct io *io, int sync) + struct io *io, int sync, unsigned short ioprio) { int i; struct dpages old_pages = *dp; @@ -388,7 +389,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions, for (i = 0; i < num_regions; i++) { *dp = old_pages; if (where[i].count || (opf & REQ_PREFLUSH)) - do_region(opf, i, where + i, dp, io); + do_region(opf, i, where + i, dp, io, ioprio); } /* @@ -413,7 +414,7 @@ static void sync_io_complete(unsigned long error, void *context) static int sync_io(struct dm_io_client *client, unsigned int num_regions, struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, - unsigned long *error_bits) + unsigned long *error_bits, unsigned short ioprio) { struct io *io; struct sync_io sio; @@ -435,7 +436,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(opf, num_regions, where, dp, io, 1); + dispatch_io(opf, num_regions, where, dp, io, 1, ioprio); wait_for_completion_io(&sio.wait); @@ -447,7 +448,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, static int async_io(struct dm_io_client *client, unsigned int num_regions, struct dm_io_region *where, blk_opf_t opf, - struct dpages *dp, io_notify_fn fn, void *context) + struct dpages *dp, io_notify_fn fn, void *context, + unsigned short ioprio) { struct io *io; @@ -467,7 +469,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; - dispatch_io(opf, num_regions, where, dp, io, 0); + dispatch_io(opf, num_regions, where, dp, io, 0, ioprio); return 0; } @@ -509,7 +511,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, } int dm_io(struct dm_io_request *io_req, unsigned int num_regions, - struct dm_io_region *where, unsigned long *sync_error_bits) + struct dm_io_region *where, unsigned long *sync_error_bits, + unsigned short ioprio) { int r; struct dpages dp; @@ -520,11 +523,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions, if (!io_req->notify.fn) return sync_io(io_req->client, num_regions, where, - io_req->bi_opf, &dp, sync_error_bits); + io_req->bi_opf, &dp, sync_error_bits, ioprio); return async_io(io_req->client, num_regions, where, io_req->bi_opf, &dp, io_req->notify.fn, - io_req->notify.context); + io_req->notify.context, ioprio); } EXPORT_SYMBOL(dm_io); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 0ef78e56a..fda51bd14 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -572,9 +572,9 @@ static int run_io_job(struct kcopyd_job *job) io_job_start(job->kc->throttle); if (job->op == REQ_OP_READ) - r = dm_io(&io_req, 1, &job->source, NULL); + r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT); else - r = dm_io(&io_req, job->num_dests, job->dests, NULL); + r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT); return r; } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 05141eea1..da77878cb 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -295,7 +295,7 @@ static int rw_header(struct log_c *lc, enum req_op op) { lc->io_req.bi_opf = op; - return dm_io(&lc->io_req, 1, &lc->header_location, NULL); + return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT); } static int flush_header(struct log_c *lc) @@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc) lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - return dm_io(&lc->io_req, 1, &null_location, NULL); + return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT); } static int read_header(struct log_c *log) @@ -756,8 +756,8 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region, log_clear_bit(lc, lc->recovering_bits, region); if (in_sync) { log_set_bit(lc, lc->sync_bits, region); - lc->sync_count++; - } else if (log_test_bit(lc->sync_bits, region)) { + lc->sync_count++; + } else if (log_test_bit(lc->sync_bits, region)) { lc->sync_count--; log_clear_bit(lc, lc->sync_bits, region); } @@ -765,9 +765,9 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region, static region_t core_get_sync_count(struct dm_dirty_log *log) { - struct log_c *lc = (struct log_c *) log->context; + struct log_c *lc = (struct log_c *) log->context; - return lc->sync_count; + return lc->sync_count; } #define DMEMIT_SYNC \ diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 4b7528dc2..99b4738e8 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -362,8 +362,8 @@ static struct { const int mode; const char *param; } _raid456_journal_mode[] = { - { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" }, - { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" } + { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" }, + { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" } }; /* Return MD raid4/5/6 journal mode for dm @journal_mode one */ @@ -1114,7 +1114,7 @@ too_many: * [stripe_cache <sectors>] Stripe cache size for higher RAIDs * [region_size <sectors>] Defines granularity of bitmap * [journal_dev <dev>] raid4/5/6 journaling deviice - * (i.e. write hole closing log) + * (i.e. write hole closing log) * * RAID10-only options: * [raid10_copies <# copies>] Number of copies. (Default: 2) @@ -3325,14 +3325,14 @@ static int raid_map(struct dm_target *ti, struct bio *bio) struct mddev *mddev = &rs->md; /* - * If we're reshaping to add disk(s)), ti->len and + * If we're reshaping to add disk(s), ti->len and * mddev->array_sectors will differ during the process * (ti->len > mddev->array_sectors), so we have to requeue * bios with addresses > mddev->array_sectors here or * there will occur accesses past EOD of the component * data images thus erroring the raid set. */ - if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) + if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors)) return DM_MAPIO_REQUEUE; md_handle_request(mddev, bio); @@ -3999,7 +3999,7 @@ static int raid_preresume(struct dm_target *ti) } /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */ - if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && + if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) || (rs->requested_bitmap_chunk_sectors && mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) { @@ -4046,7 +4046,9 @@ static void raid_resume(struct dm_target *ti) * Take this opportunity to check whether any failed * devices are reachable again. */ + mddev_lock_nointr(mddev); attempt_restore_of_faulty_devices(rs); + mddev_unlock(mddev); } if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index c38e63706..1004199ae 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -273,7 +273,7 @@ static int mirror_flush(struct dm_target *ti) } error_bits = -1; - dm_io(&io_req, ms->nr_mirrors, io, &error_bits); + dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT); if (unlikely(error_bits != 0)) { for (i = 0; i < ms->nr_mirrors; i++) if (test_bit(i, &error_bits)) @@ -543,7 +543,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio) map_region(&io, m, bio); bio_set_m(bio, m); - BUG_ON(dm_io(&io_req, 1, &io, NULL)); + BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT)); } static inline int region_in_sync(struct mirror_set *ms, region_t region, @@ -670,7 +670,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) */ bio_set_m(bio, get_default_mirror(ms)); - BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); + BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT)); } static void do_writes(struct mirror_set *ms, struct bio_list *writes) @@ -902,7 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, if (IS_ERR(ms->io_client)) { ti->error = "Error creating dm_io client"; kfree(ms); - return NULL; + return NULL; } ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 80b95746a..eee1cd3aa 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -220,7 +220,7 @@ static void do_metadata(struct work_struct *work) { struct mdata_req *req = container_of(work, struct mdata_req, work); - req->result = dm_io(req->io_req, 1, req->where, NULL); + req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT); } /* @@ -244,7 +244,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf, struct mdata_req req; if (!metadata) - return dm_io(&io_req, 1, &where, NULL); + return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT); req.where = &where; req.io_req = &io_req; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index b748901a4..1c601508c 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -679,8 +679,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et, for (i = 0; i < size; i++) { slot = et->table + i; - hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) + hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) { kmem_cache_free(mem, ex); + cond_resched(); + } } kvfree(et->table); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e0367a672..aabb24350 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -72,7 +72,7 @@ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) n = get_child(n, CHILDREN_PER_NODE - 1); if (n >= t->counts[l]) - return (sector_t) - 1; + return (sector_t) -1; return get_node(t, l, n)[KEYS_PER_NODE - 1]; } @@ -1533,7 +1533,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t, if (ti->type->iterate_devices && ti->type->iterate_devices(ti, func, data)) return true; - } + } return false; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 601f9e4e6..f24d89af7 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1179,9 +1179,9 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO); discard_parent->bi_end_io = passdown_endio; discard_parent->bi_private = m; - if (m->maybe_shared) - passdown_double_checking_shared_status(m, discard_parent); - else { + if (m->maybe_shared) + passdown_double_checking_shared_status(m, discard_parent); + else { struct discard_op op; begin_discard(&op, tc, discard_parent); diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index b48e1b59e..6a707b41d 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -503,7 +503,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io, io_loc.bdev = v->data_dev->bdev; io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT); io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT); - r = dm_io(&io_req, 1, &io_loc, NULL); + r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT); if (unlikely(r)) goto free_ret; diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 4620a98c9..db93a9116 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -80,12 +80,12 @@ struct dm_verity_io { /* original value of bio->bi_end_io */ bio_end_io_t *orig_bi_end_io; + struct bvec_iter iter; + sector_t block; unsigned int n_blocks; bool in_tasklet; - struct bvec_iter iter; - struct work_struct work; char *recheck_buffer; diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index c6ff43a8f..20fc84b24 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) req.notify.context = &endio; /* writing via async dm-io (implied by notify.fn above) won't return an error */ - (void) dm_io(&req, 1, ®ion, NULL); + (void) dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT); i = j; } @@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc) req.notify.fn = NULL; req.notify.context = NULL; - r = dm_io(&req, 1, ®ion, NULL); + r = dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT); if (unlikely(r)) writecache_error(wc, r, "error writing superblock"); } @@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) req.client = wc->dm_io; req.notify.fn = NULL; - r = dm_io(&req, 1, ®ion, NULL); + r = dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT); if (unlikely(r)) writecache_error(wc, r, "error flushing metadata: %d", r); } @@ -984,7 +984,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors req.client = wc->dm_io; req.notify.fn = NULL; - return dm_io(&req, 1, ®ion, NULL); + return dm_io(&req, 1, ®ion, NULL, IOPRIO_DEFAULT); } static void writecache_resume(struct dm_target *ti) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0ec85d159..29270f6f2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2897,6 +2897,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend static void __dm_internal_resume(struct mapped_device *md) { + int r; + struct dm_table *map; + BUG_ON(!md->internal_suspend_count); if (--md->internal_suspend_count) @@ -2905,12 +2908,23 @@ static void __dm_internal_resume(struct mapped_device *md) if (dm_suspended_md(md)) goto done; /* resume from nested suspend */ - /* - * NOTE: existing callers don't need to call dm_table_resume_targets - * (which may fail -- so best to avoid it for now by passing NULL map) - */ - (void) __dm_resume(md, NULL); - + map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + r = __dm_resume(md, map); + if (r) { + /* + * If a preresume method of some target failed, we are in a + * tricky situation. We can't return an error to the caller. We + * can't fake success because then the "resume" and + * "postsuspend" methods would not be paired correctly, and it + * would break various targets, for example it would cause list + * corruption in the "origin" target. + * + * So, we fake normal suspend here, to make sure that the + * "resume" and "postsuspend" methods will be paired correctly. + */ + DMERR("Preresume method failed: %d", r); + set_bit(DMF_SUSPENDED, &md->flags); + } done: clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); smp_mb__after_atomic(); diff --git a/drivers/md/md.c b/drivers/md/md.c index 846bdee4d..788acc81e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4903,11 +4903,21 @@ action_store(struct mddev *mddev, const char *page, size_t len) return -EINVAL; err = mddev_lock(mddev); if (!err) { - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { err = -EBUSY; - else { + } else if (mddev->reshape_position == MaxSector || + mddev->pers->check_reshape == NULL || + mddev->pers->check_reshape(mddev)) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); err = mddev->pers->start_reshape(mddev); + } else { + /* + * If reshape is still in progress, and + * md_check_recovery() can continue to reshape, + * don't restart reshape because data can be + * corrupted for raid456. + */ + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } mddev_unlock(mddev); } @@ -6233,7 +6243,15 @@ static void md_clean(struct mddev *mddev) mddev->persistent = 0; mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; - mddev->flags = 0; + /* + * Don't clear MD_CLOSING, or mddev can be opened again. + * 'hold_active != 0' means mddev is still in the creation + * process and will be used later. + */ + if (mddev->hold_active) + mddev->flags = 0; + else + mddev->flags &= BIT_ULL_MASK(MD_CLOSING); mddev->sb_flags = 0; mddev->ro = MD_RDWR; mddev->metadata_type[0] = 0; @@ -7561,7 +7579,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, int err = 0; void __user *argp = (void __user *)arg; struct mddev *mddev = NULL; - bool did_set_md_closing = false; if (!md_ioctl_valid(cmd)) return -ENOTTY; @@ -7648,7 +7665,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, err = -EBUSY; goto out; } - did_set_md_closing = true; mutex_unlock(&mddev->open_mutex); sync_blockdev(bdev); } @@ -7811,7 +7827,7 @@ unlock: mddev->hold_active = 0; mddev_unlock(mddev); out: - if(did_set_md_closing) + if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY)) clear_bit(MD_CLOSING, &mddev->flags); return err; } diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 1cc783d70..18d949d63 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -726,7 +726,7 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type * * nodes, so saves metadata space. */ static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index, - struct dm_btree_value_type *vt, uint64_t key) + struct dm_btree_value_type *vt, uint64_t key) { int r; unsigned int middle_index; @@ -781,7 +781,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned int parent_inde if (shadow_current(s) != right) unlock_block(s->info, right); - return r; + return r; } @@ -1216,7 +1216,7 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root, static bool need_insert(struct btree_node *node, uint64_t *keys, unsigned int level, unsigned int index) { - return ((index >= le32_to_cpu(node->header.nr_entries)) || + return ((index >= le32_to_cpu(node->header.nr_entries)) || (le64_to_cpu(node->keys[index]) != keys[level])); } diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index af800efed..4833a3998 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -390,7 +390,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, } int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, - dm_block_t begin, dm_block_t end, dm_block_t *b) + dm_block_t begin, dm_block_t end, dm_block_t *b) { int r; uint32_t count; diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h index 706ceb85d..63d9a72e3 100644 --- a/drivers/md/persistent-data/dm-space-map-common.h +++ b/drivers/md/persistent-data/dm-space-map-common.h @@ -120,7 +120,7 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result); int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, dm_block_t end, dm_block_t *result); int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, - dm_block_t begin, dm_block_t end, dm_block_t *result); + dm_block_t begin, dm_block_t end, dm_block_t *result); /* * The next three functions return (via nr_allocations) the net number of diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7b318e7e8..009f7ffe4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -920,6 +920,7 @@ static void flush_pending_writes(struct r10conf *conf) raid1_submit_write(bio); bio = next; + cond_resched(); } blk_finish_plug(&plug); } else @@ -1130,6 +1131,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) raid1_submit_write(bio); bio = next; + cond_resched(); } kfree(plug); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e4564ca1f..8cf231785 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2420,7 +2420,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) atomic_inc(&conf->active_stripes); raid5_release_stripe(sh); - conf->max_nr_stripes++; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1); return 1; } @@ -2717,7 +2717,7 @@ static int drop_one_stripe(struct r5conf *conf) shrink_buffers(sh); free_stripe(conf->slab_cache, sh); atomic_dec(&conf->active_stripes); - conf->max_nr_stripes--; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1); return 1; } @@ -6891,7 +6891,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) if (size <= 16 || size > 32768) return -EINVAL; - conf->min_nr_stripes = size; + WRITE_ONCE(conf->min_nr_stripes, size); mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) @@ -6903,7 +6903,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) if (!grow_one_stripe(conf, GFP_KERNEL)) { - conf->min_nr_stripes = conf->max_nr_stripes; + WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes); result = -ENOMEM; break; } @@ -7468,11 +7468,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); + int max_stripes = READ_ONCE(conf->max_nr_stripes); + int min_stripes = READ_ONCE(conf->min_nr_stripes); - if (conf->max_nr_stripes < conf->min_nr_stripes) + if (max_stripes < min_stripes) /* unlikely, but not impossible */ return 0; - return conf->max_nr_stripes - conf->min_nr_stripes; + return max_stripes - min_stripes; } static struct r5conf *setup_conf(struct mddev *mddev) diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index 303d02b1d..fe30f5b00 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w) { unsigned pat; unsigned plane; + int ret = 0; tpg->max_line_width = max_w; for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) { @@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w) tpg->lines[pat][plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->lines[pat][plane]) - return -ENOMEM; + if (!tpg->lines[pat][plane]) { + ret = -ENOMEM; + goto free_lines; + } if (plane == 0) continue; tpg->downsampled_lines[pat][plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->downsampled_lines[pat][plane]) - return -ENOMEM; + if (!tpg->downsampled_lines[pat][plane]) { + ret = -ENOMEM; + goto free_lines; + } } } for (plane = 0; plane < TPG_MAX_PLANES; plane++) { @@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w) tpg->contrast_line[plane] = vzalloc(array_size(pixelsz, max_w)); - if (!tpg->contrast_line[plane]) - return -ENOMEM; + if (!tpg->contrast_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } tpg->black_line[plane] = vzalloc(array_size(pixelsz, max_w)); - if (!tpg->black_line[plane]) - return -ENOMEM; + if (!tpg->black_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } tpg->random_line[plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->random_line[plane]) - return -ENOMEM; + if (!tpg->random_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } } return 0; + +free_contrast_line: + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->contrast_line[plane]); + vfree(tpg->black_line[plane]); + vfree(tpg->random_line[plane]); + tpg->contrast_line[plane] = NULL; + tpg->black_line[plane] = NULL; + tpg->random_line[plane] = NULL; + } +free_lines: + for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->lines[pat][plane]); + tpg->lines[pat][plane] = NULL; + if (plane == 0) + continue; + vfree(tpg->downsampled_lines[pat][plane]); + tpg->downsampled_lines[pat][plane] = NULL; + } + return ret; } EXPORT_SYMBOL_GPL(tpg_alloc); diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index d352e0284..aefee2277 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -494,6 +494,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); if (!dvbdevfops) { kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } @@ -502,6 +503,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, if (!new_node) { kfree(dvbdevfops); kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } @@ -535,6 +537,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, } list_del (&dvbdev->list_head); kfree(dvbdev); + *pdvbdev = NULL; up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); return -EINVAL; @@ -557,6 +560,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return ret; } @@ -575,6 +579,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return PTR_ERR(clsdev); } diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 04556b77c..0977564a4 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -118,50 +118,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S } }; -static -int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) +static noinline_for_stack +int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) { - u8 buf[MAX_XFER_SIZE]; + u8 buf[3] = { MSB(reg), LSB(reg), data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, - .len = len + 2 + .len = 3, }; int ret; - if (2 + len > sizeof(buf)) { - printk(KERN_WARNING - "%s: i2c wr reg=%04x: len=%d is too big!\n", - KBUILD_MODNAME, reg, len); - return -EINVAL; - } - - - buf[0] = MSB(reg); - buf[1] = LSB(reg); - memcpy(buf + 2, data, len); - if (i2cdebug) printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__, - state->config->demod_address, reg, buf[2]); + state->config->demod_address, reg, data); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n", - __func__, state->config->demod_address, reg, buf[2]); + __func__, state->config->demod_address, reg, data); return (ret != 1) ? -EREMOTEIO : 0; } -static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) -{ - u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ - - return stv0367_writeregs(state, reg, &tmp, 1); -} - -static u8 stv0367_readreg(struct stv0367_state *state, u16 reg) +static noinline_for_stack +u8 stv0367_readreg(struct stv0367_state *state, u16 reg) { u8 b0[] = { 0, 0 }; u8 b1[] = { 0 }; diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 200841c1f..68628ccec 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -2094,9 +2094,6 @@ static int tc358743_probe(struct i2c_client *client) state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24; sd->dev = &client->dev; - err = v4l2_async_register_subdev(sd); - if (err < 0) - goto err_hdl; mutex_init(&state->confctl_mutex); @@ -2154,6 +2151,10 @@ static int tc358743_probe(struct i2c_client *client) if (err) goto err_work_queues; + err = v4l2_async_register_subdev(sd); + if (err < 0) + goto err_work_queues; + v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name, client->addr << 1, client->adapter->name); diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index f268cf660..8919df09e 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -509,14 +509,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk, /* * Move the top entry link cursor to the next link. If all links of the entry - * have been visited, pop the entry itself. + * have been visited, pop the entry itself. Return true if the entry has been + * popped. */ -static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) +static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk) { struct media_pipeline_walk_entry *entry; if (WARN_ON(walk->stack.top < 0)) - return; + return false; entry = media_pipeline_walk_top(walk); @@ -526,7 +527,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) walk->stack.top); walk->stack.top--; - return; + return true; } entry->links = entry->links->next; @@ -534,6 +535,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) dev_dbg(walk->mdev->dev, "media pipeline: moved entry %u to next link\n", walk->stack.top); + + return false; } /* Free all memory allocated while walking the pipeline. */ @@ -579,30 +582,24 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, struct media_pipeline_walk *walk) { struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk); - struct media_pad *pad; + struct media_pad *origin; struct media_link *link; struct media_pad *local; struct media_pad *remote; + bool last_link; int ret; - pad = entry->pad; + origin = entry->pad; link = list_entry(entry->links, typeof(*link), list); - media_pipeline_walk_pop(walk); + last_link = media_pipeline_walk_pop(walk); dev_dbg(walk->mdev->dev, "media pipeline: exploring link '%s':%u -> '%s':%u\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); - /* Skip links that are not enabled. */ - if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { - dev_dbg(walk->mdev->dev, - "media pipeline: skipping link (disabled)\n"); - return 0; - } - /* Get the local pad and remote pad. */ - if (link->source->entity == pad->entity) { + if (link->source->entity == origin->entity) { local = link->source; remote = link->sink; } else { @@ -614,25 +611,64 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, * Skip links that originate from a different pad than the incoming pad * that is not connected internally in the entity to the incoming pad. */ - if (pad != local && - !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) { + if (origin != local && + !media_entity_has_pad_interdep(origin->entity, origin->index, + local->index)) { dev_dbg(walk->mdev->dev, "media pipeline: skipping link (no route)\n"); - return 0; + goto done; } /* - * Add the local and remote pads of the link to the pipeline and push - * them to the stack, if they're not already present. + * Add the local pad of the link to the pipeline and push it to the + * stack, if not already present. */ ret = media_pipeline_add_pad(pipe, walk, local); if (ret) return ret; + /* Similarly, add the remote pad, but only if the link is enabled. */ + if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (disabled)\n"); + goto done; + } + ret = media_pipeline_add_pad(pipe, walk, remote); if (ret) return ret; +done: + /* + * If we're done iterating over links, iterate over pads of the entity. + * This is necessary to discover pads that are not connected with any + * link. Those are dead ends from a pipeline exploration point of view, + * but are still part of the pipeline and need to be added to enable + * proper validation. + */ + if (!last_link) + return 0; + + dev_dbg(walk->mdev->dev, + "media pipeline: adding unconnected pads of '%s'\n", + local->entity->name); + + media_entity_for_each_pad(origin->entity, local) { + /* + * Skip the origin pad (already handled), pad that have links + * (already discovered through iterating over links) and pads + * not internally connected. + */ + if (origin == local || !local->num_links || + !media_entity_has_pad_interdep(origin->entity, origin->index, + local->index)) + continue; + + ret = media_pipeline_add_pad(pipe, walk, local); + if (ret) + return ret; + } + return 0; } @@ -744,7 +780,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad, struct media_pad *pad = ppad->pad; struct media_entity *entity = pad->entity; bool has_enabled_link = false; - bool has_link = false; struct media_link *link; dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name, @@ -774,7 +809,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad, /* Record if the pad has links and enabled links. */ if (link->flags & MEDIA_LNK_FL_ENABLED) has_enabled_link = true; - has_link = true; /* * Validate the link if it's enabled and has the @@ -812,7 +846,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad, * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set, * ensure that it has either no link or an enabled link. */ - if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link && + if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && !has_enabled_link) { dev_dbg(mdev->dev, "Pad '%s':%u must be connected by an enabled link\n", @@ -957,6 +991,9 @@ static void __media_entity_remove_link(struct media_entity *entity, /* Remove the reverse links for a data link. */ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) { + link->source->num_links--; + link->sink->num_links--; + if (link->source->entity == entity) remote = link->sink->entity; else @@ -1017,6 +1054,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, struct media_link *link; struct media_link *backlink; + if (flags & MEDIA_LNK_FL_LINK_TYPE) + return -EINVAL; + + flags |= MEDIA_LNK_FL_DATA_LINK; + if (WARN_ON(!source || !sink) || WARN_ON(source_pad >= source->num_pads) || WARN_ON(sink_pad >= sink->num_pads)) @@ -1032,7 +1074,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, link->source = &source->pads[source_pad]; link->sink = &sink->pads[sink_pad]; - link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK; + link->flags = flags; /* Initialize graph object embedded at the new link */ media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK, @@ -1063,6 +1105,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, sink->num_links++; source->num_links++; + link->source->num_links++; + link->sink->num_links++; + return 0; } EXPORT_SYMBOL_GPL(media_create_pad_link); diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c index b065ccd06..378a1cba0 100644 --- a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c +++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c @@ -26,7 +26,7 @@ static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg) vpu->inst_addr = msg->vpu_inst_addr; } -static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len, +static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv) { const struct mdp_ipi_comm_ack *msg = data; diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c index cfc7ebed8..1ec29f1b1 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c @@ -29,15 +29,7 @@ static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id, mtk_vcodec_ipi_handler handler, const char *name, void *priv) { - /* - * The handler we receive takes a void * as its first argument. We - * cannot change this because it needs to be passed down to the rproc - * subsystem when SCP is used. VPU takes a const argument, which is - * more constrained, so the conversion below is safe. - */ - ipi_handler_t handler_const = (ipi_handler_t)handler; - - return vpu_ipi_register(fw->pdev, id, handler_const, name, priv); + return vpu_ipi_register(fw->pdev, id, handler, name, priv); } static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf, diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c index 6beab9e86..44adf5cfc 100644 --- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c +++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c @@ -635,7 +635,7 @@ OUT_LOAD_FW: } EXPORT_SYMBOL_GPL(vpu_load_firmware); -static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv) +static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv) { struct mtk_vpu *vpu = priv; const struct vpu_run *run = data; diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.h b/drivers/media/platform/mediatek/vpu/mtk_vpu.h index a56053ff1..da05f3e74 100644 --- a/drivers/media/platform/mediatek/vpu/mtk_vpu.h +++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.h @@ -17,7 +17,7 @@ * VPU interfaces with other blocks by share memory and interrupt. */ -typedef void (*ipi_handler_t) (const void *data, +typedef void (*ipi_handler_t) (void *data, unsigned int len, void *priv); diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c index d4540684e..0bcb9db5a 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c @@ -701,6 +701,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx) unsigned int i; u32 status; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h index f9ec1c613..5776292f9 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h @@ -467,6 +467,7 @@ struct rkisp1_debug { * @debug: debug params to be exposed on debugfs * @info: version-specific ISP information * @irqs: IRQ line numbers + * @irqs_enabled: the hardware is enabled and can cause interrupts */ struct rkisp1_device { void __iomem *base_addr; @@ -488,6 +489,7 @@ struct rkisp1_device { struct rkisp1_debug debug; const struct rkisp1_info *info; int irqs[RKISP1_NUM_IRQS]; + bool irqs_enabled; }; /* diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c index e862f515c..95b6e41c4 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c @@ -211,6 +211,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx) struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); u32 val, status; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c index 41abb18b0..7a3b69ba5 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c @@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev) { struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); + rkisp1->irqs_enabled = false; + /* Make sure the IRQ handler will see the above */ + mb(); + + /* + * Wait until any running IRQ handler has returned. The IRQ handler + * may get called even after this (as it's a shared interrupt line) + * but the 'irqs_enabled' flag will make the handler return immediately. + */ + for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) { + if (rkisp1->irqs[il] == -1) + continue; + + /* Skip if the irq line is the same as previous */ + if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il]) + synchronize_irq(rkisp1->irqs[il]); + } + clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks); return pinctrl_pm_select_sleep_state(dev); } @@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev) if (ret) return ret; + rkisp1->irqs_enabled = true; + /* Make sure the IRQ handler will see the above */ + mb(); + return 0; } diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c index 00dca284c..2af5c1a48 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c @@ -1023,6 +1023,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx) struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); u32 status, isp_err; + if (!rkisp1->irqs_enabled) + return IRQ_NONE; + status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS); if (!status) return IRQ_NONE; diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c index aa65d70b6..7a2f558c9 100644 --- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c +++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c @@ -66,6 +66,7 @@ static void deinterlace_device_run(void *priv) struct vb2_v4l2_buffer *src, *dst; unsigned int hstep, vstep; dma_addr_t addr; + int i; src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); @@ -160,6 +161,26 @@ static void deinterlace_device_run(void *priv) deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep); deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep); + /* neutral filter coefficients */ + deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, + DEINTERLACE_FRM_CTRL_COEF_ACCESS); + readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val, + val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40); + + for (i = 0; i < 32; i++) { + deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4, + DEINTERLACE_IDENTITY_COEF); + deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4, + DEINTERLACE_IDENTITY_COEF); + deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4, + DEINTERLACE_IDENTITY_COEF); + deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4, + DEINTERLACE_IDENTITY_COEF); + } + + deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL, + DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0); + deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL, DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK, DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field)); @@ -248,7 +269,6 @@ static irqreturn_t deinterlace_irq(int irq, void *data) static void deinterlace_init(struct deinterlace_dev *dev) { u32 val; - int i; deinterlace_write(dev, DEINTERLACE_BYPASS, DEINTERLACE_BYPASS_CSC); @@ -284,27 +304,7 @@ static void deinterlace_init(struct deinterlace_dev *dev) deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF, DEINTERLACE_CHROMA_DIFF_TH_MSK, - DEINTERLACE_CHROMA_DIFF_TH(5)); - - /* neutral filter coefficients */ - deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, - DEINTERLACE_FRM_CTRL_COEF_ACCESS); - readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val, - val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40); - - for (i = 0; i < 32; i++) { - deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4, - DEINTERLACE_IDENTITY_COEF); - deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4, - DEINTERLACE_IDENTITY_COEF); - deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4, - DEINTERLACE_IDENTITY_COEF); - deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4, - DEINTERLACE_IDENTITY_COEF); - } - - deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL, - DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0); + DEINTERLACE_CHROMA_DIFF_TH(31)); } static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file) @@ -931,11 +931,18 @@ static int deinterlace_runtime_resume(struct device *device) return ret; } + ret = reset_control_deassert(dev->rstc); + if (ret) { + dev_err(dev->dev, "Failed to apply reset\n"); + + goto err_exclusive_rate; + } + ret = clk_prepare_enable(dev->bus_clk); if (ret) { dev_err(dev->dev, "Failed to enable bus clock\n"); - goto err_exclusive_rate; + goto err_rst; } ret = clk_prepare_enable(dev->mod_clk); @@ -952,23 +959,16 @@ static int deinterlace_runtime_resume(struct device *device) goto err_mod_clk; } - ret = reset_control_deassert(dev->rstc); - if (ret) { - dev_err(dev->dev, "Failed to apply reset\n"); - - goto err_ram_clk; - } - deinterlace_init(dev); return 0; -err_ram_clk: - clk_disable_unprepare(dev->ram_clk); err_mod_clk: clk_disable_unprepare(dev->mod_clk); err_bus_clk: clk_disable_unprepare(dev->bus_clk); +err_rst: + reset_control_assert(dev->rstc); err_exclusive_rate: clk_rate_exclusive_put(dev->mod_clk); @@ -979,11 +979,12 @@ static int deinterlace_runtime_suspend(struct device *device) { struct deinterlace_dev *dev = dev_get_drvdata(device); - reset_control_assert(dev->rstc); - clk_disable_unprepare(dev->ram_clk); clk_disable_unprepare(dev->mod_clk); clk_disable_unprepare(dev->bus_clk); + + reset_control_assert(dev->rstc); + clk_rate_exclusive_put(dev->mod_clk); return 0; diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index 57ded9ff3..29bc63021 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc4000_priv *priv = fe->tuner_priv; + mutex_lock(&priv->lock); *freq = priv->freq_hz + priv->freq_offset; if (debug) { - mutex_lock(&priv->lock); if ((priv->cur_fw.type & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) { u16 snr = 0; @@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) return 0; } } - mutex_unlock(&priv->lock); } + mutex_unlock(&priv->lock); dprintk(1, "%s()\n", __func__); diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 4d037c92a..bae76023c 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -4094,6 +4094,10 @@ static int em28xx_usb_probe(struct usb_interface *intf, * topology will likely change after the load of the em28xx subdrivers. */ #ifdef CONFIG_MEDIA_CONTROLLER + /* + * No need to check the return value, the device will still be + * usable without media controller API. + */ retval = media_device_register(dev->media_dev); #endif diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c index 0c24e2984..eb03f98b2 100644 --- a/drivers/media/usb/go7007/go7007-driver.c +++ b/drivers/media/usb/go7007/go7007-driver.c @@ -80,7 +80,7 @@ static int go7007_load_encoder(struct go7007 *go) const struct firmware *fw_entry; char fw_name[] = "go7007/go7007fw.bin"; void *bounce; - int fw_len, rv = 0; + int fw_len; u16 intr_val, intr_data; if (go->boot_fw == NULL) { @@ -109,9 +109,11 @@ static int go7007_load_encoder(struct go7007 *go) go7007_read_interrupt(go, &intr_val, &intr_data) < 0 || (intr_val & ~0x1) != 0x5a5a) { v4l2_err(go, "error transferring firmware\n"); - rv = -1; + kfree(go->boot_fw); + go->boot_fw = NULL; + return -1; } - return rv; + return 0; } MODULE_FIRMWARE("go7007/go7007fw.bin"); diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index eeb85981e..762c13e49 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -1201,7 +1201,9 @@ static int go7007_usb_probe(struct usb_interface *intf, u16 channel; /* read channel number from GPIO[1:0] */ - go7007_read_addr(go, 0x3c81, &channel); + if (go7007_read_addr(go, 0x3c81, &channel)) + goto allocfail; + channel &= 0x3; go->board_id = GO7007_BOARDID_ADLINK_MPG24; usb->board = board = &board_adlink_mpg24; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c index 1764674de..73c95ba23 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-context.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c @@ -90,8 +90,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp) } -static void pvr2_context_notify(struct pvr2_context *mp) +static void pvr2_context_notify(void *ptr) { + struct pvr2_context *mp = ptr; + pvr2_context_set_notify(mp,!0); } @@ -106,9 +108,7 @@ static void pvr2_context_check(struct pvr2_context *mp) pvr2_trace(PVR2_TRACE_CTXT, "pvr2_context %p (initialize)", mp); /* Finish hardware initialization */ - if (pvr2_hdw_initialize(mp->hdw, - (void (*)(void *))pvr2_context_notify, - mp)) { + if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) { mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw); /* Trigger interface initialization. By doing this @@ -267,9 +267,9 @@ static void pvr2_context_exit(struct pvr2_context *mp) void pvr2_context_disconnect(struct pvr2_context *mp) { pvr2_hdw_disconnect(mp->hdw); - mp->disconnect_flag = !0; if (!pvr2_context_shutok()) pvr2_context_notify(mp); + mp->disconnect_flag = !0; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c index 26811efe0..9a9bae21c 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c @@ -88,8 +88,10 @@ static int pvr2_dvb_feed_thread(void *data) return stat; } -static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap) +static void pvr2_dvb_notify(void *ptr) { + struct pvr2_dvb_adapter *adap = ptr; + wake_up(&adap->buffer_wait_data); } @@ -149,7 +151,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap) } pvr2_stream_set_callback(pvr->video_stream.stream, - (pvr2_stream_callback) pvr2_dvb_notify, adap); + pvr2_dvb_notify, adap); ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT); if (ret < 0) return ret; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index c04ab7258..d608b793f 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -1033,8 +1033,10 @@ static int pvr2_v4l2_open(struct file *file) } -static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp) +static void pvr2_v4l2_notify(void *ptr) { + struct pvr2_v4l2_fh *fhp = ptr; + wake_up(&fhp->wait_data); } @@ -1067,7 +1069,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh) hdw = fh->channel.mc_head->hdw; sp = fh->pdi->stream->stream; - pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh); + pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh); pvr2_hdw_set_stream_type(hdw,fh->pdi->config); if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret; return pvr2_ioread_set_enabled(fh->rhp,!0); @@ -1198,11 +1200,6 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip, dip->minor_type = pvr2_v4l_type_video; nr_ptr = video_nr; caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO; - if (!dip->stream) { - pr_err(KBUILD_MODNAME - ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n"); - return; - } break; case VFL_TYPE_VBI: dip->config = pvr2_config_vbi; diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index be7fde1ed..97645d650 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -1084,11 +1084,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev, entity->function = function; ret = media_entity_pads_init(entity, num_pads, pads); - if (ret) + if (ret) { + kfree(entity->name); + entity->name = NULL; return ret; + } ret = media_device_register_entity(mdev, entity); - if (ret) + if (ret) { + kfree(entity->name); + entity->name = NULL; return ret; + } return 0; } diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c index 5d3715a28..dbe100994 100644 --- a/drivers/mfd/altera-sysmgr.c +++ b/drivers/mfd/altera-sysmgr.c @@ -110,7 +110,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np, dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver, (void *)sysmgr_np); - of_node_put(sysmgr_np); + if (property) + of_node_put(sysmgr_np); + if (!dev) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 6196724ef..ecfe15122 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -223,7 +223,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np, return ERR_PTR(-ENODEV); regmap = syscon_node_to_regmap(syscon_np); - of_node_put(syscon_np); + + if (property) + of_node_put(syscon_np); return regmap; } diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index fdec2c30e..63c717053 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -199,8 +199,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); - if (!lis3->pdata || !lis3->pdata->wakeup_flags) + /* Turn on for wakeup if turned off by runtime suspend */ + if (lis3->pdata && lis3->pdata->wakeup_flags) { + if (pm_runtime_suspended(dev)) + lis3lv02d_poweron(lis3); + /* For non wakeup turn off if not already turned off by runtime suspend */ + } else if (!pm_runtime_suspended(dev)) lis3lv02d_poweroff(lis3); + return 0; } @@ -209,13 +215,12 @@ static int lis3lv02d_i2c_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); - /* - * pm_runtime documentation says that devices should always - * be powered on at resume. Pm_runtime turns them off after system - * wide resume is complete. - */ - if (!lis3->pdata || !lis3->pdata->wakeup_flags || - pm_runtime_suspended(dev)) + /* Turn back off if turned on for wakeup and runtime suspended*/ + if (lis3->pdata && lis3->pdata->wakeup_flags) { + if (pm_runtime_suspended(dev)) + lis3lv02d_poweroff(lis3); + /* For non wakeup turn back on if not runtime suspended */ + } else if (!pm_runtime_suspended(dev)) lis3lv02d_poweron(lis3); return 0; diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index bdc65d50b..3390ff511 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -112,6 +112,8 @@ #define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ #define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */ +#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */ +#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */ /* * MEI HW Section diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 5bf0d50d5..f8219cbd2 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -119,6 +119,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)}, /* required last entry */ {0, } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index ea60efaec..657772546 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -415,7 +415,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( struct mmc_blk_ioc_data *idata; int err; - idata = kmalloc(sizeof(*idata), GFP_KERNEL); + idata = kzalloc(sizeof(*idata), GFP_KERNEL); if (!idata) { err = -ENOMEM; goto out; @@ -490,7 +490,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, if (idata->flags & MMC_BLK_IOC_DROP) return 0; - if (idata->flags & MMC_BLK_IOC_SBC) + if (idata->flags & MMC_BLK_IOC_SBC && i > 0) prev_idata = idatas[i - 1]; /* @@ -889,10 +889,11 @@ static const struct block_device_operations mmc_bdops = { static int mmc_blk_part_switch_pre(struct mmc_card *card, unsigned int part_type) { - const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; + const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if ((part_type & mask) == mask) { + if ((part_type & mask) == rpmb) { if (card->ext_csd.cmdq_en) { ret = mmc_cmdq_disable(card); if (ret) @@ -907,10 +908,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card, static int mmc_blk_part_switch_post(struct mmc_card *card, unsigned int part_type) { - const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; + const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if ((part_type & mask) == mask) { + if ((part_type & mask) == rpmb) { mmc_retune_unpause(card->host); if (card->reenable_cmdq && !card->ext_csd.cmdq_en) ret = mmc_cmdq_enable(card); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 033be559a..bfb7c8b96 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1442,6 +1442,9 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + if (host->tuning_mode != SDHCI_TUNING_MODE_3) + mmc_retune_needed(host->mmc); + if (omap_host->con != -EINVAL) sdhci_runtime_suspend_host(host); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 437048bb8..5024cae41 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work) else mrq->cmd->error = -ETIMEDOUT; + /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */ + host->mrq = ERR_PTR(-EBUSY); host->cmd = NULL; host->data = NULL; diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 9aa3027ca..f2abebb2d 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -886,7 +886,6 @@ static int wmt_mci_remove(struct platform_device *pdev) { struct mmc_host *mmc; struct wmt_mci_priv *priv; - struct resource *res; u32 reg_tmp; mmc = platform_get_drvdata(pdev); @@ -914,9 +913,6 @@ static int wmt_mci_remove(struct platform_device *pdev) clk_disable_unprepare(priv->clk_sdmmc); clk_put(priv->clk_sdmmc); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - mmc_free_host(mmc); dev_info(&pdev->dev, "WMT MCI device removed\n"); diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c index 19dad5a23..8cdb35121 100644 --- a/drivers/mtd/maps/physmap-core.c +++ b/drivers/mtd/maps/physmap-core.c @@ -524,7 +524,7 @@ static int physmap_flash_probe(struct platform_device *dev) if (!info->maps[i].phys) info->maps[i].phys = res->start; - info->win_order = get_bitmask_order(resource_size(res)) - 1; + info->win_order = fls64(resource_size(res)) - 1; info->maps[i].size = BIT(info->win_order + (info->gpios ? info->gpios->ndescs : 0)); diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index 452ecaf77..1cfe3dd0b 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -303,8 +303,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip) return 0; } -static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host) +static irqreturn_t lpc3xxx_nand_irq(int irq, void *data) { + struct lpc32xx_nand_host *host = data; uint8_t sr; /* Clear interrupt flag by reading status */ @@ -779,7 +780,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) goto release_dma_chan; } - if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq, + if (request_irq(host->irq, &lpc3xxx_nand_irq, IRQF_TRIGGER_HIGH, DRV_NAME, host)) { dev_err(&pdev->dev, "Error requesting NAND IRQ\n"); res = -ENXIO; diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 0aeac8ccb..05925fb69 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -63,7 +63,7 @@ #define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \ ( \ (cmd_dir) | \ - ((ran) << 19) | \ + (ran) | \ ((bch) << 14) | \ ((short_mode) << 13) | \ (((page_size) & 0x7f) << 6) | \ diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index ca2d9efe6..1060e1920 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi) sizeof(struct ubi_fm_scan_pool) + sizeof(struct ubi_fm_scan_pool) + (ubi->peb_count * sizeof(struct ubi_fm_ec)) + - (sizeof(struct ubi_fm_eba) + - (ubi->peb_count * sizeof(__be32))) + - sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; + ((sizeof(struct ubi_fm_eba) + + sizeof(struct ubi_fm_volhdr)) * + (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) + + (ubi->peb_count * sizeof(__be32)); return roundup(size, ubi->leb_size); } diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index f700f0e4f..6e5489e23 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) * The number of supported volumes is limited by the eraseblock size * and by the UBI_MAX_VOLUMES constant. */ + + if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) { + ubi_err(ubi, "LEB size too small for a volume record"); + return -EINVAL; + } + ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; if (ubi->vtbl_slots > UBI_MAX_VOLUMES) ubi->vtbl_slots = UBI_MAX_VOLUMES; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index b988c8a40..07065c1af 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -998,20 +998,56 @@ unlock_exit: mutex_unlock(&priv->reg_mutex); } +/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std + * 802.1Qâ„¢-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA + * must only be propagated to C-VLAN and MAC Bridge components. That means + * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports, + * these frames are supposed to be processed by the CPU (software). So we make + * the switch only forward them to the CPU port. And if received from a CPU + * port, forward to a single port. The software is responsible of making the + * switch conform to the latter by setting a single port as destination port on + * the special tag. + * + * This switch intellectual property cannot conform to this part of the standard + * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC + * DAs, it also includes :22-FF which the scope of propagation is not supposed + * to be restricted for these MAC DAs. + */ static void mt753x_trap_frames(struct mt7530_priv *priv) { - /* Trap BPDUs to the CPU port(s) */ - mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, + /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them + * VLAN-untagged. + */ + mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK | + MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK | + MT753X_BPDU_PORT_FW_MASK, + MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) | + MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | MT753X_BPDU_CPU_ONLY); - /* Trap 802.1X PAE frames to the CPU port(s) */ - mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK, - MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY)); + /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress + * them VLAN-untagged. + */ + mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK | + MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK | + MT753X_R01_PORT_FW_MASK, + MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) | + MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_BPDU_CPU_ONLY); - /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ - mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, - MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); + /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress + * them VLAN-untagged. + */ + mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK | + MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK | + MT753X_R03_PORT_FW_MASK, + MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) | + MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_BPDU_CPU_ONLY); } static int @@ -2187,11 +2223,11 @@ mt7530_setup(struct dsa_switch *ds) */ if (priv->mcm) { reset_control_assert(priv->rstc); - usleep_range(1000, 1100); + usleep_range(5000, 5100); reset_control_deassert(priv->rstc); } else { gpiod_set_value_cansleep(priv->reset, 0); - usleep_range(1000, 1100); + usleep_range(5000, 5100); gpiod_set_value_cansleep(priv->reset, 1); } @@ -2401,11 +2437,11 @@ mt7531_setup(struct dsa_switch *ds) */ if (priv->mcm) { reset_control_assert(priv->rstc); - usleep_range(1000, 1100); + usleep_range(5000, 5100); reset_control_deassert(priv->rstc); } else { gpiod_set_value_cansleep(priv->reset, 0); - usleep_range(1000, 1100); + usleep_range(5000, 5100); gpiod_set_value_cansleep(priv->reset, 1); } diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 6202b0f8c..fa2afa67c 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -63,14 +63,33 @@ enum mt753x_id { /* Registers for BPDU and PAE frame control*/ #define MT753X_BPC 0x24 -#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) +#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22) +#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x) #define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16) #define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x) +#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6) +#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x) +#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) + +/* Register for :01 and :02 MAC DA frame control */ +#define MT753X_RGAC1 0x28 +#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22) +#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x) +#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x) +#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6) +#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x) +#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0) /* Register for :03 and :0E MAC DA frame control */ #define MT753X_RGAC2 0x2c +#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22) +#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x) #define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16) #define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x) +#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6) +#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x) +#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0) enum mt753x_bpdu_port_fw { MT753X_BPDU_FOLLOW_MFC, @@ -251,6 +270,7 @@ enum mt7530_port_mode { enum mt7530_vlan_port_eg_tag { MT7530_VLAN_EG_DISABLED = 0, MT7530_VLAN_EG_CONSISTENT = 1, + MT7530_VLAN_EG_UNTAGGED = 4, }; enum mt7530_vlan_port_attr { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 044b8afde..9e82e7b9c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3174,22 +3174,6 @@ error_drop_packet: return NETDEV_TX_OK; } -static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) -{ - u16 qid; - /* we suspect that this is good for in--kernel network services that - * want to loop incoming skb rx to tx in normal user generated traffic, - * most probably we will not get to this - */ - if (skb_rx_queue_recorded(skb)) - qid = skb_get_rx_queue(skb); - else - qid = netdev_pick_tx(dev, skb, NULL); - - return qid; -} - static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { struct device *dev = &pdev->dev; @@ -3359,7 +3343,6 @@ static const struct net_device_ops ena_netdev_ops = { .ndo_open = ena_open, .ndo_stop = ena_close, .ndo_start_xmit = ena_start_xmit, - .ndo_select_queue = ena_select_queue, .ndo_get_stats64 = ena_get_stats64, .ndo_tx_timeout = ena_tx_timeout, .ndo_change_mtu = ena_change_mtu, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index d8b1824c3..0bc1367fd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, struct bnx2x_alloc_pool *pool) { - if (!pool->page) - return; - put_page(pool->page); pool->page = NULL; @@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, { int i; + if (!fp->page_pool.page) + return; + if (fp->mode == TPA_MODE_DISABLED) return; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 97d12c7ee..ebff14b08 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2236,8 +2236,6 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; - phy_dev->mac_managed_pm = 1; - phy_attached_info(phy_dev); return 0; @@ -2249,10 +2247,12 @@ static int fec_enet_mii_init(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); bool suppress_preamble = false; + struct phy_device *phydev; struct device_node *node; int err = -ENXIO; u32 mii_speed, holdtime; u32 bus_freq; + int addr; /* * The i.MX28 dual fec interfaces are not equal. @@ -2362,6 +2362,13 @@ static int fec_enet_mii_init(struct platform_device *pdev) goto err_out_free_mdiobus; of_node_put(node); + /* find all the PHY devices on the bus and set mac_managed_pm to true */ + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + phydev = mdiobus_get_phy(fep->mii_bus, addr); + if (phydev) + phydev->mac_managed_pm = true; + } + mii_cnt++; /* save fec0 mii_bus */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index f3c9395d8..618f66d95 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -85,7 +85,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS, true); - desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); + desc.data[0] = cpu_to_le32(tqp->index); ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) { dev_err(&hw->cmq.csq.pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index 3b6dbf158..f72dc0cee 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app) if (hns3_nic_resetting(ndev)) return -EBUSY; - if (h->kinfo.dcb_ops->ieee_setapp) + if (h->kinfo.dcb_ops->ieee_delapp) return h->kinfo.dcb_ops->ieee_delapp(h, app); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index e22835ae8..9fce976a0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -78,6 +78,9 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 +#define HNS3_NIC_LB_TEST_UNEXECUTED 4 + +static int hns3_get_sset_count(struct net_device *netdev, int stringset); static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { @@ -419,18 +422,26 @@ static void hns3_do_external_lb(struct net_device *ndev, static void hns3_self_test(struct net_device *ndev, struct ethtool_test *eth_test, u64 *data) { + int cnt = hns3_get_sset_count(ndev, ETH_SS_TEST); struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; int st_param[HNAE3_LOOP_NONE][2]; bool if_running = netif_running(ndev); + int i; + + /* initialize the loopback test result, avoid marking an unexcuted + * loopback test as PASS. + */ + for (i = 0; i < cnt; i++) + data[i] = HNS3_NIC_LB_TEST_UNEXECUTED; if (hns3_nic_resetting(ndev)) { netdev_err(ndev, "dev resetting!"); - return; + goto failure; } if (!(eth_test->flags & ETH_TEST_FL_OFFLINE)) - return; + goto failure; if (netif_msg_ifdown(h)) netdev_info(ndev, "self test start\n"); @@ -452,6 +463,10 @@ static void hns3_self_test(struct net_device *ndev, if (netif_msg_ifdown(h)) netdev_info(ndev, "self test end\n"); + return; + +failure: + eth_test->flags |= ETH_TEST_FL_FAILED; } static void hns3_update_limit_promisc_mode(struct net_device *netdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 48b0cb5ec..9db363fbc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2990,7 +2990,10 @@ static int hclge_mac_init(struct hclge_dev *hdev) int ret; hdev->support_sfp_query = true; - hdev->hw.mac.duplex = HCLGE_MAC_FULL; + + if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, hdev->hw.mac.duplex, hdev->hw.mac.lane_num); if (ret) @@ -11601,6 +11604,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto err_pci_uninit; + devl_lock(hdev->devlink); + /* Firmware command queue initialize */ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) @@ -11775,6 +11780,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_task_schedule(hdev, round_jiffies_relative(HZ)); + devl_unlock(hdev->devlink); return 0; err_mdiobus_unreg: @@ -11787,6 +11793,7 @@ err_msi_uninit: err_cmd_uninit: hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); err_devlink_uninit: + devl_unlock(hdev->devlink); hclge_devlink_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.hw.io_base); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index a40b1583f..0f06f95b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -120,7 +120,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, u64 ns = nsec; u32 sec_h; - if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) + if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) return; /* Since the BD does not have enough space for the higher 16 bits of diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h index 8510b88d4..f3cd5a376 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get, __field(u8, code) __field(u8, subcode) __string(pciname, pci_name(hdev->pdev)) - __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __string(devname, hdev->vport[0].nic.kinfo.netdev->name) __array(u32, mbx_data, PF_GET_MBX_LEN) ), @@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get, __entry->code = req->msg.code; __entry->subcode = req->msg.subcode; __assign_str(pciname, pci_name(hdev->pdev)); - __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, sizeof(struct hclge_mbx_vf_to_pf_cmd)); ), @@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send, __field(u8, vfid) __field(u16, code) __string(pciname, pci_name(hdev->pdev)) - __string(devname, &hdev->vport[0].nic.kinfo.netdev->name) + __string(devname, hdev->vport[0].nic.kinfo.netdev->name) __array(u32, mbx_data, PF_SEND_MBX_LEN) ), @@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send, __entry->vfid = req->dest_vfid; __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); - __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); + __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, sizeof(struct hclge_mbx_pf_to_vf_cmd)); ), diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h index 5d4895bb5..b259e95dd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get, __field(u8, vfid) __field(u16, code) __string(pciname, pci_name(hdev->pdev)) - __string(devname, &hdev->nic.kinfo.netdev->name) + __string(devname, hdev->nic.kinfo.netdev->name) __array(u32, mbx_data, VF_GET_MBX_LEN) ), @@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get, __entry->vfid = req->dest_vfid; __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); - __assign_str(devname, &hdev->nic.kinfo.netdev->name); + __assign_str(devname, hdev->nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, sizeof(struct hclge_mbx_pf_to_vf_cmd)); ), @@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send, __field(u8, code) __field(u8, subcode) __string(pciname, pci_name(hdev->pdev)) - __string(devname, &hdev->nic.kinfo.netdev->name) + __string(devname, hdev->nic.kinfo.netdev->name) __array(u32, mbx_data, VF_SEND_MBX_LEN) ), @@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send, __entry->code = req->msg.code; __entry->subcode = req->msg.subcode; __assign_str(pciname, pci_name(hdev->pdev)); - __assign_str(devname, &hdev->nic.kinfo.netdev->name); + __assign_str(devname, hdev->nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, sizeof(struct hclge_mbx_vf_to_pf_cmd)); ), diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7d4cc4eaf..5293fc009 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -108,7 +108,7 @@ #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ /* driver state flags */ -enum i40e_state_t { +enum i40e_state { __I40E_TESTING, __I40E_CONFIG_BUSY, __I40E_CONFIG_DONE, @@ -156,7 +156,7 @@ enum i40e_state_t { BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED) /* VSI state flags */ -enum i40e_vsi_state_t { +enum i40e_vsi_state { __I40E_VSI_DOWN, __I40E_VSI_NEEDS_RESTART, __I40E_VSI_SYNCING_FILTERS, @@ -992,6 +992,8 @@ struct i40e_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; + bool in_busy_poll; + int irq_num; /* IRQ assigned to this q_vector */ } ____cacheline_internodealigned_in_smp; /* lan device */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d8a7fb21b..a9db1ed74 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1249,8 +1249,11 @@ int i40e_count_filters(struct i40e_vsi *vsi) int bkt; int cnt = 0; - hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) - ++cnt; + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { + if (f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_ACTIVE) + ++cnt; + } return cnt; } @@ -3888,6 +3891,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; + /* Set ITR for software interrupts triggered after exiting + * busy-loop polling. + */ + wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1), + I40E_ITR_20K); + wr32(hw, I40E_PFINT_RATEN(vector - 1), i40e_intrl_usec_to_reg(vsi->int_rate_limit)); @@ -4142,6 +4151,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) } /* register for affinity change notifications */ + q_vector->irq_num = irq_num; q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 97a9efe7b..5f2555848 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -34,7 +34,7 @@ enum i40e_ptp_pin { GPIO_4 }; -enum i40e_can_set_pins_t { +enum i40e_can_set_pins { CANT_DO_PINS = -1, CAN_SET_PINS, CAN_DO_PINS @@ -192,7 +192,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw) * return CAN_DO_PINS if pins can be manipulated within a NIC or * return CANT_DO_PINS otherwise. **/ -static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf) +static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf) { if (!i40e_is_ptp_pin_dev(&pf->hw)) { dev_warn(&pf->pdev->dev, @@ -1081,7 +1081,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf) static int i40e_ptp_set_pins(struct i40e_pf *pf, struct i40e_ptp_pins_settings *pins) { - enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf); + enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf); int i = 0; if (pin_caps == CANT_DO_PINS) diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 7339003aa..694cb3e45 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -328,8 +328,11 @@ #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 94cf82668..3d83fccf7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2571,7 +2571,22 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_packets; } -static inline u32 i40e_buildreg_itr(const int type, u16 itr) +/** + * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register + * @itr_idx: interrupt throttling index + * @interval: interrupt throttling interval value in usecs + * @force_swint: force software interrupt + * + * The function builds a value for I40E_PFINT_DYN_CTLN register that + * is used to update interrupt throttling interval for specified ITR index + * and optionally enforces a software interrupt. If the @itr_idx is equal + * to I40E_ITR_NONE then no interval change is applied and only @force_swint + * parameter is taken into account. If the interval change and enforced + * software interrupt are not requested then the built value just enables + * appropriate vector interrupt. + **/ +static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval, + bool force_swint) { u32 val; @@ -2585,23 +2600,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) * an event in the PBA anyway so we need to rely on the automask * to hold pending events for us until the interrupt is re-enabled * - * The itr value is reported in microseconds, and the register - * value is recorded in 2 microsecond units. For this reason we - * only need to shift by the interval shift - 1 instead of the - * full value. + * We have to shift the given value as it is reported in microseconds + * and the register value is recorded in 2 microsecond units. */ - itr &= I40E_ITR_MASK; + interval >>= 1; + /* 1. Enable vector interrupt + * 2. Update the interval for the specified ITR index + * (I40E_ITR_NONE in the register is used to indicate that + * no interval update is requested) + */ val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); + FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) | + FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval); + + /* 3. Enforce software interrupt trigger if requested + * (These software interrupts rate is limited by ITR2 that is + * set to 20K interrupts per second) + */ + if (force_swint) + val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | + FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK, + I40E_SW_ITR); return val; } -/* a small macro to shorten up some long lines */ -#define INTREG I40E_PFINT_DYN_CTLN - /* The act of updating the ITR will cause it to immediately trigger. In order * to prevent this from throwing off adaptive update statistics we defer the * update so that it can only happen so often. So after either Tx or Rx are @@ -2620,8 +2645,10 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr) static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { + enum i40e_dyn_idx itr_idx = I40E_ITR_NONE; struct i40e_hw *hw = &vsi->back->hw; - u32 intval; + u16 interval = 0; + u32 itr_val; /* If we don't have MSIX, then we only need to re-enable icr0 */ if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { @@ -2643,8 +2670,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ if (q_vector->rx.target_itr < q_vector->rx.current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, - q_vector->rx.target_itr); + itr_idx = I40E_RX_ITR; + interval = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || @@ -2653,25 +2680,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ - intval = i40e_buildreg_itr(I40E_TX_ITR, - q_vector->tx.target_itr); + itr_idx = I40E_TX_ITR; + interval = q_vector->tx.target_itr; q_vector->tx.current_itr = q_vector->tx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { /* Rx ITR needs to be increased, third priority */ - intval = i40e_buildreg_itr(I40E_RX_ITR, - q_vector->rx.target_itr); + itr_idx = I40E_RX_ITR; + interval = q_vector->rx.target_itr; q_vector->rx.current_itr = q_vector->rx.target_itr; q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* No ITR update, lowest priority */ - intval = i40e_buildreg_itr(I40E_ITR_NONE, 0); if (q_vector->itr_countdown) q_vector->itr_countdown--; } - if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - wr32(hw, INTREG(q_vector->reg_idx), intval); + /* Do not update interrupt control register if VSI is down */ + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + /* Update ITR interval if necessary and enforce software interrupt + * if we are exiting busy poll. + */ + if (q_vector->in_busy_poll) { + itr_val = i40e_buildreg_itr(itr_idx, interval, true); + q_vector->in_busy_poll = false; + } else { + itr_val = i40e_buildreg_itr(itr_idx, interval, false); + } + wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val); } /** @@ -2778,6 +2816,8 @@ tx_only: */ if (likely(napi_complete_done(napi, work_done))) i40e_update_enable_itr(vsi, q_vector); + else + q_vector->in_busy_poll = true; return min(work_done, budget - 1); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 768290dc6..6e567d343 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -57,7 +57,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl) * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * register but instead is a special value meaning "don't update" ITR0/1/2. */ -enum i40e_dyn_idx_t { +enum i40e_dyn_idx { I40E_IDX_ITR0 = 0, I40E_IDX_ITR1 = 1, I40E_IDX_ITR2 = 2, @@ -67,6 +67,7 @@ enum i40e_dyn_idx_t { /* these are indexes into ITRN registers */ #define I40E_RX_ITR I40E_IDX_ITR0 #define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_SW_ITR I40E_IDX_ITR2 /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ @@ -304,7 +305,7 @@ struct i40e_rx_queue_stats { u64 page_busy_count; }; -enum i40e_ring_state_t { +enum i40e_ring_state { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, __I40E_RING_STATE_NBITS /* must be last */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ed4be80fe..a5f0c95cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1626,8 +1626,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) { struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; - int i, v; u32 reg; + int i; /* If we don't have any VFs, then there is nothing to reset */ if (!pf->num_alloc_vfs) @@ -1638,11 +1638,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) return false; /* Begin reset on all VFs at once */ - for (v = 0; v < pf->num_alloc_vfs; v++) { - vf = &pf->vf[v]; + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* If VF is being reset no need to trigger reset again */ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) - i40e_trigger_vf_reset(&pf->vf[v], flr); + i40e_trigger_vf_reset(vf, flr); } /* HW requires some time to make sure it can flush the FIFO for a VF @@ -1651,14 +1650,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) * the VFs using a simple iterator that increments once that VF has * finished resetting. */ - for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { + for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) { usleep_range(10000, 20000); /* Check each VF in sequence, beginning with the VF to fail * the previous check. */ - while (v < pf->num_alloc_vfs) { - vf = &pf->vf[v]; + while (vf < &pf->vf[pf->num_alloc_vfs]) { if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) @@ -1668,7 +1666,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) /* If the current VF has finished resetting, move on * to the next VF in sequence. */ - v++; + ++vf; } } @@ -1678,39 +1676,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) /* Display a warning if at least one VF didn't manage to reset in * time, but continue on with the operation. */ - if (v < pf->num_alloc_vfs) + if (vf < &pf->vf[pf->num_alloc_vfs]) dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", - pf->vf[v].vf_id); + vf->vf_id); usleep_range(10000, 20000); /* Begin disabling all the rings associated with VFs, but do not wait * between each VF. */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* On initial reset, we don't have any queues to disable */ - if (pf->vf[v].lan_vsi_idx == 0) + if (vf->lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); + i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]); } /* Now that we've notified HW to disable all of the VF rings, wait * until they finish. */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* On initial reset, we don't have any queues to disable */ - if (pf->vf[v].lan_vsi_idx == 0) + if (vf->lan_vsi_idx == 0) continue; /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); + i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]); } /* Hw may need up to 50ms to finish disabling the RX queues. We @@ -1719,12 +1717,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) mdelay(50); /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { /* If VF is reset in another thread just continue */ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) continue; - i40e_cleanup_reset_vf(&pf->vf[v]); + i40e_cleanup_reset_vf(vf); } i40e_flush(hw); @@ -3078,11 +3076,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) /* Allow to delete VF primary MAC only if it was not set * administratively by PF or if VF is trusted. */ - if (ether_addr_equal(addr, vf->default_lan_addr.addr) && - i40e_can_vf_change_mac(vf)) - was_unimac_deleted = true; - else - continue; + if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { + if (i40e_can_vf_change_mac(vf)) + was_unimac_deleted = true; + else + continue; + } if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = I40E_ERR_INVALID_MAC_ADDR; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 45ce4ed16..81d9a5338 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6926,44 +6926,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) static void igb_tsync_interrupt(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 ack = 0, tsicr = rd32(E1000_TSICR); + u32 tsicr = rd32(E1000_TSICR); struct ptp_clock_event event; if (tsicr & TSINTR_SYS_WRAP) { event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); - ack |= TSINTR_SYS_WRAP; } if (tsicr & E1000_TSICR_TXTS) { /* retrieve hardware timestamp */ schedule_work(&adapter->ptp_tx_work); - ack |= E1000_TSICR_TXTS; } - if (tsicr & TSINTR_TT0) { + if (tsicr & TSINTR_TT0) igb_perout(adapter, 0); - ack |= TSINTR_TT0; - } - if (tsicr & TSINTR_TT1) { + if (tsicr & TSINTR_TT1) igb_perout(adapter, 1); - ack |= TSINTR_TT1; - } - if (tsicr & TSINTR_AUTT0) { + if (tsicr & TSINTR_AUTT0) igb_extts(adapter, 0); - ack |= TSINTR_AUTT0; - } - if (tsicr & TSINTR_AUTT1) { + if (tsicr & TSINTR_AUTT1) igb_extts(adapter, 1); - ack |= TSINTR_AUTT1; - } - - /* acknowledge the interrupts */ - wr32(E1000_TSICR, ack); } static irqreturn_t igb_msix_other(int irq, void *data) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 774de63dd..15fc2acff 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -908,7 +908,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) goto err_out; } - xs = kzalloc(sizeof(*xs), GFP_KERNEL); + algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); + if (unlikely(!algo)) { + err = -ENOENT; + goto err_out; + } + + xs = kzalloc(sizeof(*xs), GFP_ATOMIC); if (unlikely(!xs)) { err = -ENOMEM; goto err_out; @@ -924,14 +930,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); xs->xso.dev = adapter->netdev; - algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); - if (unlikely(!algo)) { - err = -ENOENT; - goto err_xs; - } - aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; - xs->aead = kzalloc(aead_len, GFP_KERNEL); + xs->aead = kzalloc(aead_len, GFP_ATOMIC); if (unlikely(!xs->aead)) { err = -ENOMEM; goto err_xs; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 90be87dc1..254cad45a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -814,6 +814,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; @@ -1346,7 +1351,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data) /* Release thread waiting for completion */ lmac->cmd_pend = false; - wake_up_interruptible(&lmac->wq_cmd_cmplt); + wake_up(&lmac->wq_cmd_cmplt); break; case CGX_EVT_ASYNC: if (cgx_event_is_linkevent(event)) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 9690ac01f..7d741e3ba 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) } EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); -void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; + u64 intr_val; tx_hdr = hw_mbase + mbox->tx_start; rx_hdr = hw_mbase + mbox->rx_start; @@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) spin_unlock(&mdev->mbox_lock); + /* Check if interrupt pending */ + intr_val = readq((void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); + + intr_val |= data; /* The interrupt should be fired after num_msgs is written * to the shared memory */ - writeq(1, (void __iomem *)mbox->reg_base + + writeq(intr_val, (void __iomem *)mbox->reg_base + (mbox->trigger | (devid << mbox->tr_shift))); } + +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +{ + otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG); +} EXPORT_SYMBOL(otx2_mbox_msg_send); +void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid) +{ + otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG); +} +EXPORT_SYMBOL(otx2_mbox_msg_send_up); + +bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid) +{ + u64 data; + + data = readq((void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); + + /* If data is non-zero wait for ~1ms and return to caller + * whether data has changed to zero or not after the wait. + */ + if (!data) + return true; + + usleep_range(950, 1000); + + data = readq((void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); + + return data == 0; +} +EXPORT_SYMBOL(otx2_mbox_wait_for_zero); + struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size, int size_rsp) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 03ebabd61..be70269e9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -16,6 +16,9 @@ #define MBOX_SIZE SZ_64K +#define MBOX_DOWN_MSG 1 +#define MBOX_UP_MSG 2 + /* AF/PF: PF initiated, PF/VF VF initiated */ #define MBOX_DOWN_RX_START 0 #define MBOX_DOWN_RX_SIZE (46 * SZ_1K) @@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase, struct pci_dev *pdev, void __force *reg_base, int direction, int ndevs, unsigned long *bmap); void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); +void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid); int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, @@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0); } +bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid); + /* Mailbox message types */ #define MBOX_MSG_MASK 0xFFFF #define MBOX_MSG_INVALID 0xFFFE @@ -196,6 +202,9 @@ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \ +M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \ +M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \ + cpt_flt_eng_info_rsp) \ /* SDP mbox IDs (range 0x1000 - 0x11FF) */ \ M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \ M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \ @@ -1702,6 +1711,28 @@ struct cpt_inst_lmtst_req { u64 rsvd; }; +/* Mailbox message format to request for CPT LF reset */ +struct cpt_lf_rst_req { + struct mbox_msghdr hdr; + u32 slot; + u32 rsvd; +}; + +/* Mailbox message format to request for CPT faulted engines */ +struct cpt_flt_eng_info_req { + struct mbox_msghdr hdr; + int blkaddr; + bool reset; + u32 rsvd; +}; + +struct cpt_flt_eng_info_rsp { + struct mbox_msghdr hdr; + u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU]; + u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU]; + u64 rsvd; +}; + struct sdp_node_info { /* Node to which this PF belons to */ u8 node_id; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index dfd23580e..d39d86e69 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event) static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) { struct mcs_intr_info *req; - int err, pf; + int pf; pf = rvu_get_pf(event->pcifunc); + mutex_lock(&rvu->mbox_lock); + req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf); - if (!req) + if (!req) { + mutex_unlock(&rvu->mbox_lock); return -ENOMEM; + } req->mcs_id = event->mcs_id; req->intr_mask = event->intr_mask; @@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) req->hdr.pcifunc = event->pcifunc; req->lmac_id = event->lmac_id; - otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf); - err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); - if (err) - dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf); + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index d88d86bf0..a7034b47e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1164,8 +1164,16 @@ cpt: goto nix_err; } + err = rvu_cpt_init(rvu); + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__); + goto mcs_err; + } + return 0; +mcs_err: + rvu_mcs_exit(rvu); nix_err: rvu_nix_freemem(rvu); npa_err: @@ -2106,7 +2114,7 @@ bad_message: } } -static void __rvu_mbox_handler(struct rvu_work *mwork, int type) +static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) { struct rvu *rvu = mwork->rvu; int offset, err, id, devid; @@ -2173,6 +2181,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type) } mw->mbox_wrk[devid].num_msgs = 0; + if (poll) + otx2_mbox_wait_for_zero(mbox, devid); + /* Send mbox responses to VF/PF */ otx2_mbox_msg_send(mbox, devid); } @@ -2180,15 +2191,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type) static inline void rvu_afpf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = mwork->rvu; - __rvu_mbox_handler(mwork, TYPE_AFPF); + mutex_lock(&rvu->mbox_lock); + __rvu_mbox_handler(mwork, TYPE_AFPF, true); + mutex_unlock(&rvu->mbox_lock); } static inline void rvu_afvf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); - __rvu_mbox_handler(mwork, TYPE_AFVF); + __rvu_mbox_handler(mwork, TYPE_AFVF, false); } static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) @@ -2363,6 +2377,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, } } + mutex_init(&rvu->mbox_lock); + mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); if (!mbox_regions) { err = -ENOMEM; @@ -2512,10 +2528,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first, } } -static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq) { struct rvu *rvu = (struct rvu *)rvu_irq; - int vfs = rvu->vfs; u64 intr; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); @@ -2529,6 +2544,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); + return IRQ_HANDLED; +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfs = rvu->vfs; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + /* Handle VF interrupts */ if (vfs > 64) { intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); @@ -2865,7 +2892,7 @@ static int rvu_register_interrupts(struct rvu *rvu) /* Register mailbox interrupt handler */ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), - rvu_mbox_intr_handler, 0, + rvu_mbox_pf_intr_handler, 0, &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, @@ -3039,9 +3066,8 @@ static int rvu_flr_init(struct rvu *rvu) cfg | BIT_ULL(22)); } - rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - 1); + rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!rvu->flr_wq) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 0b76dfa97..a3ae21398 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -109,6 +109,8 @@ struct rvu_block { u64 lfreset_reg; unsigned char name[NAME_SIZE]; struct rvu *rvu; + u64 cpt_flt_eng_map[3]; + u64 cpt_rcvrd_eng_map[3]; }; struct nix_mcast { @@ -506,6 +508,7 @@ struct rvu { struct ptp *ptp; int mcs_blk_cnt; + int cpt_pf_num; #ifdef CONFIG_DEBUG_FS struct rvu_debugfs rvu_dbg; @@ -520,6 +523,10 @@ struct rvu { struct list_head mcs_intrq_head; /* mcs interrupt queue lock */ spinlock_t mcs_intrq_lock; + /* CPT interrupt lock */ + spinlock_t cpt_intr_lock; + + struct mutex mbox_lock; /* Serialize mbox up and down msgs */ }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -872,6 +879,7 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu); int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot); int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc); +int rvu_cpt_init(struct rvu *rvu); #define NDC_AF_BANK_MASK GENMASK_ULL(7, 0) #define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index bcb4385d0..cc6d6c94f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) continue; lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { + if (iter >= MAX_LMAC_COUNT) + continue; lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); @@ -232,7 +234,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) struct cgx_link_user_info *linfo; struct cgx_link_info_msg *msg; unsigned long pfmap; - int err, pfid; + int pfid; linfo = &event->link_uinfo; pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); @@ -250,16 +252,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) continue; } + mutex_lock(&rvu->mbox_lock); + /* Send mbox message to PF */ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); - if (!msg) + if (!msg) { + mutex_unlock(&rvu->mbox_lock); continue; + } + msg->link_info = *linfo; - otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); - err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); - if (err) - dev_warn(rvu->dev, "notification to pf %d failed\n", - pfid); + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); + + mutex_unlock(&rvu->mbox_lock); } while (pfmap); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 38bbae5d9..6fb02b93c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -37,34 +37,68 @@ (_rsp)->free_sts_##etype = free_sts; \ }) -static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr) +static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr) { struct rvu_block *block = ptr; struct rvu *rvu = block->rvu; int blkaddr = block->addr; - u64 reg0, reg1, reg2; - - reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); - reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); - if (!is_rvu_otx2(rvu)) { - reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2)); - dev_err_ratelimited(rvu->dev, - "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx", - reg0, reg1, reg2); - } else { - dev_err_ratelimited(rvu->dev, - "Received CPTAF FLT irq : 0x%llx, 0x%llx", - reg0, reg1); + u64 reg, val; + int i, eng; + u8 grp; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec)); + dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg); + + i = -1; + while ((i = find_next_bit((unsigned long *)®, 64, i + 1)) < 64) { + switch (vec) { + case 0: + eng = i; + break; + case 1: + eng = i + 64; + break; + case 2: + eng = i + 128; + break; + } + grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF; + /* Disable and enable the engine which triggers fault */ + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0); + val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng)); + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL); + + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp); + rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL); + + spin_lock(&rvu->cpt_intr_lock); + block->cpt_flt_eng_map[vec] |= BIT_ULL(i); + val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng)); + val = val & 0x3; + if (val == 0x1 || val == 0x2) + block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i); + spin_unlock(&rvu->cpt_intr_lock); } - - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0); - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1); - if (!is_rvu_otx2(rvu)) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg); return IRQ_HANDLED; } +static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr) +{ + return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr); +} + +static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr) +{ + return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr); +} + +static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr) +{ + return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr); +} + static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr) { struct rvu_block *block = ptr; @@ -119,8 +153,10 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off) int i; /* Disable all CPT AF interrupts */ - for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF); + rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); @@ -151,7 +187,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr) /* Disable all CPT AF interrupts */ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL); rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); @@ -172,16 +208,31 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; + irq_handler_t flt_fn; int i, ret; for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); + + switch (i) { + case CPT_10K_AF_INT_VEC_FLT0: + flt_fn = rvu_cpt_af_flt0_intr_handler; + break; + case CPT_10K_AF_INT_VEC_FLT1: + flt_fn = rvu_cpt_af_flt1_intr_handler; + break; + case CPT_10K_AF_INT_VEC_FLT2: + flt_fn = rvu_cpt_af_flt2_intr_handler; + break; + } ret = rvu_cpt_do_register_interrupt(block, off + i, - rvu_cpt_af_flt_intr_handler, - &rvu->irq_name[(off + i) * NAME_SIZE]); + flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]); if (ret) goto err; - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); + if (i == CPT_10K_AF_INT_VEC_FLT2) + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF); + else + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); } ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU, @@ -208,8 +259,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; + irq_handler_t flt_fn; int i, offs, ret = 0; - char irq_name[16]; if (!is_block_implemented(rvu->hw, blkaddr)) return 0; @@ -226,13 +277,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr) return cpt_10k_register_interrupts(block, offs); for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) { - snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i); + sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i); + switch (i) { + case CPT_AF_INT_VEC_FLT0: + flt_fn = rvu_cpt_af_flt0_intr_handler; + break; + case CPT_AF_INT_VEC_FLT1: + flt_fn = rvu_cpt_af_flt1_intr_handler; + break; + } ret = rvu_cpt_do_register_interrupt(block, offs + i, - rvu_cpt_af_flt_intr_handler, - irq_name); + flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]); if (ret) goto err; - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); } ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU, @@ -290,7 +348,7 @@ static int get_cpt_pf_num(struct rvu *rvu) static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) { - int cpt_pf_num = get_cpt_pf_num(rvu); + int cpt_pf_num = rvu->cpt_pf_num; if (rvu_get_pf(pcifunc) != cpt_pf_num) return false; @@ -302,7 +360,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) { - int cpt_pf_num = get_cpt_pf_num(rvu); + int cpt_pf_num = rvu->cpt_pf_num; if (rvu_get_pf(pcifunc) != cpt_pf_num) return false; @@ -801,6 +859,64 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req, return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc); } +int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int cptlf, blkaddr, ret; + u16 actual_slot; + u64 ctl, ctl2; + + blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, + req->slot, &actual_slot); + if (blkaddr < 0) + return CPT_AF_ERR_LF_INVALID; + + block = &rvu->hw->block[blkaddr]; + + cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); + if (cptlf < 0) + return CPT_AF_ERR_LF_INVALID; + ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + + ret = rvu_lf_reset(rvu, block, cptlf); + if (ret) + dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", + block->addr, cptlf); + + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl); + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2); + + return 0; +} + +int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req, + struct cpt_flt_eng_info_rsp *rsp) +{ + struct rvu_block *block; + unsigned long flags; + int blkaddr, vec; + + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) + return blkaddr; + + block = &rvu->hw->block[blkaddr]; + for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) { + spin_lock_irqsave(&rvu->cpt_intr_lock, flags); + rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec]; + rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec]; + if (req->reset) { + block->cpt_flt_eng_map[vec] = 0x0; + block->cpt_rcvrd_eng_map[vec] = 0x0; + } + spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags); + } + return 0; +} + static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) { struct cpt_rxc_time_cfg_req req; @@ -940,7 +1056,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr, int nix_blkaddr) { - int cpt_pf_num = get_cpt_pf_num(rvu); + int cpt_pf_num = rvu->cpt_pf_num; struct cpt_inst_lmtst_req *req; dma_addr_t res_daddr; int timeout = 3000; @@ -1084,3 +1200,12 @@ unlock: return 0; } + +int rvu_cpt_init(struct rvu *rvu) +{ + /* Retrieve CPT PF number */ + rvu->cpt_pf_num = get_cpt_pf_num(rvu); + spin_lock_init(&rvu->cpt_intr_lock); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 55639c133..91a4ea529 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -1669,7 +1669,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, struct npc_coalesced_kpu_prfl *img_data = NULL; int i = 0, rc = -EINVAL; void __iomem *kpu_prfl_addr; - u16 offset; + u32 offset; img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; if (le64_to_cpu(img_data->signature) == KPU_SIGN && diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 44950c254..c15d1864a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -785,7 +785,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) return 0; - otx2_mbox_msg_send(&mbox->mbox_up, devid); + otx2_mbox_msg_send_up(&mbox->mbox_up, devid); err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); if (err) return err; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index a2d8ac620..6b7fb324e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -272,8 +272,7 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) { int vf; - pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq", - WQ_UNBOUND | WQ_HIGHPRI, 1); + pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI); if (!pf->flr_wq) return -ENOMEM; @@ -292,8 +291,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) return 0; } -static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, - int first, int mdevs, u64 intr, int type) +static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -307,40 +306,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, mbox = &mw->mbox; mdev = &mbox->dev[i]; - if (type == TYPE_PFAF) - otx2_sync_mbox_bbuf(mbox, i); hdr = mdev->mbase + mbox->rx_start; /* The hdr->num_msgs is set to zero immediately in the interrupt - * handler to ensure that it holds a correct value next time - * when the interrupt handler is called. - * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler - * pf>mbox.up_num_msgs holds the data for use in - * pfaf_mbox_up_handler. + * handler to ensure that it holds a correct value next time + * when the interrupt handler is called. pf->mw[i].num_msgs + * holds the data for use in otx2_pfvf_mbox_handler and + * pf->mw[i].up_num_msgs holds the data for use in + * otx2_pfvf_mbox_up_handler. */ if (hdr->num_msgs) { mw[i].num_msgs = hdr->num_msgs; hdr->num_msgs = 0; - if (type == TYPE_PFAF) - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), - sizeof(u64))); - queue_work(mbox_wq, &mw[i].mbox_wrk); } mbox = &mw->mbox_up; mdev = &mbox->dev[i]; - if (type == TYPE_PFAF) - otx2_sync_mbox_bbuf(mbox, i); hdr = mdev->mbase + mbox->rx_start; if (hdr->num_msgs) { mw[i].up_num_msgs = hdr->num_msgs; hdr->num_msgs = 0; - if (type == TYPE_PFAF) - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), - sizeof(u64))); - queue_work(mbox_wq, &mw[i].mbox_up_wrk); } } @@ -356,8 +341,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev, /* Msgs are already copied, trigger VF's mbox irq */ smp_wmb(); + otx2_mbox_wait_for_zero(pfvf_mbox, devid); + offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift); - writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset); + writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset); /* Restore VF's mbox bounce buffer region address */ src_mdev->mbase = bbuf_base; @@ -547,7 +534,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) end: offset = mbox->rx_start + msg->next_msgoff; if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1)) - __otx2_mbox_reset(mbox, 0); + __otx2_mbox_reset(mbox, vf_idx); mdev->msgs_acked++; } } @@ -564,8 +551,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) if (vfs > 64) { intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); - otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, - TYPE_PFVF); + otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr); if (intr) trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); vfs = 64; @@ -574,7 +560,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); - otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); + otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr); if (intr) trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); @@ -599,7 +585,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + WQ_MEM_RECLAIM, 0); if (!pf->mbox_pfvf_wq) return -ENOMEM; @@ -822,20 +808,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work) struct mbox *af_mbox; struct otx2_nic *pf; int offset, id; + u16 num_msgs; af_mbox = container_of(work, struct mbox, mbox_wrk); mbox = &af_mbox->mbox; mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + num_msgs = rsp_hdr->num_msgs; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); pf = af_mbox->pfvf; - for (id = 0; id < af_mbox->num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2_process_pfaf_mbox_msg(pf, msg); offset = mbox->rx_start + msg->next_msgoff; - if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) + if (mdev->msgs_acked == (num_msgs - 1)) __otx2_mbox_reset(mbox, 0); mdev->msgs_acked++; } @@ -946,12 +934,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) int offset, id, devid = 0; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; + u16 num_msgs; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + num_msgs = rsp_hdr->num_msgs; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < af_mbox->up_num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; @@ -960,10 +950,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) otx2_process_mbox_msg_up(pf, msg); offset = mbox->rx_start + msg->next_msgoff; } - if (devid) { + /* Forward to VF iff VFs are really present */ + if (devid && pci_num_vf(pf->pdev)) { otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, MBOX_DIR_PFVF_UP, devid - 1, - af_mbox->up_num_msgs); + num_msgs); return; } @@ -973,16 +964,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; - struct mbox *mbox; + struct mbox *mw = &pf->mbox; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 mbox_data; /* Clear the IRQ */ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); - mbox = &pf->mbox; - trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0)); + mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0); + + if (mbox_data & MBOX_UP_MSG) { + mbox_data &= ~MBOX_UP_MSG; + otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data); + + mbox = &mw->mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_up_wrk); + + trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", + BIT_ULL(0)); + } + + if (mbox_data & MBOX_DOWN_MSG) { + mbox_data &= ~MBOX_DOWN_MSG; + otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data); + + mbox = &mw->mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_wrk); - otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF); + trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", + BIT_ULL(0)); + } return IRQ_HANDLED; } @@ -1063,9 +1087,8 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf) int err; mbox->pfvf = pf; - pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!pf->mbox_wq) return -ENOMEM; @@ -1891,7 +1914,7 @@ int otx2_open(struct net_device *netdev) * mcam entries are enabled to receive the packets. Hence disable the * packet I/O. */ - if (err == EIO) + if (err == -EIO) goto err_disable_rxtx; else if (err) goto err_tx_stop_queues; @@ -3030,6 +3053,7 @@ static void otx2_vf_link_event_task(struct work_struct *work) struct otx2_vf_config *config; struct cgx_link_info_msg *req; struct mbox_msghdr *msghdr; + struct delayed_work *dwork; struct otx2_nic *pf; int vf_idx; @@ -3038,10 +3062,24 @@ static void otx2_vf_link_event_task(struct work_struct *work) vf_idx = config - config->pf->vf_configs; pf = config->pf; + if (config->intf_down) + return; + + mutex_lock(&pf->mbox.lock); + + dwork = &config->link_event_work; + + if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) { + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + mutex_unlock(&pf->mbox.lock); + return; + } + msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, sizeof(*req), sizeof(struct msg_rsp)); if (!msghdr) { dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); + mutex_unlock(&pf->mbox.lock); return; } @@ -3050,7 +3088,11 @@ static void otx2_vf_link_event_task(struct work_struct *work) req->hdr.sig = OTX2_MBOX_REQ_SIG; memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); + otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx); + otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); + + mutex_unlock(&pf->mbox.lock); } static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 404855bcc..dcb8190de 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work) struct otx2_mbox *mbox; struct mbox *af_mbox; int offset, id; + u16 num_msgs; af_mbox = container_of(work, struct mbox, mbox_wrk); mbox = &af_mbox->mbox; mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (af_mbox->num_msgs == 0) + num_msgs = rsp_hdr->num_msgs; + + if (num_msgs == 0) return; + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < af_mbox->num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); offset = mbox->rx_start + msg->next_msgoff; @@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) struct mbox *vf_mbox; struct otx2_nic *vf; int offset, id; + u16 num_msgs; vf_mbox = container_of(work, struct mbox, mbox_up_wrk); vf = vf_mbox->pfvf; @@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (vf_mbox->up_num_msgs == 0) + num_msgs = rsp_hdr->num_msgs; + + if (num_msgs == 0) return; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < vf_mbox->up_num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2vf_process_mbox_msg_up(vf, msg); offset = mbox->rx_start + msg->next_msgoff; @@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; + u64 mbox_data; /* Clear the IRQ */ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0); + /* Read latest mbox data */ smp_rmb(); - /* Check for PF => VF response messages */ - mbox = &vf->mbox.mbox; - mdev = &mbox->dev[0]; - otx2_sync_mbox_bbuf(mbox, 0); + if (mbox_data & MBOX_DOWN_MSG) { + mbox_data &= ~MBOX_DOWN_MSG; + otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data); - trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0)); + /* Check for PF => VF response messages */ + mbox = &vf->mbox.mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); - hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (hdr->num_msgs) { - vf->mbox.num_msgs = hdr->num_msgs; - hdr->num_msgs = 0; - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); - queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF", + BIT_ULL(0)); } - /* Check for PF => VF notification messages */ - mbox = &vf->mbox.mbox_up; - mdev = &mbox->dev[0]; - otx2_sync_mbox_bbuf(mbox, 0); - hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (hdr->num_msgs) { - vf->mbox.up_num_msgs = hdr->num_msgs; - hdr->num_msgs = 0; - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); - queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + if (mbox_data & MBOX_UP_MSG) { + mbox_data &= ~MBOX_UP_MSG; + otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data); + + /* Check for PF => VF notification messages */ + mbox = &vf->mbox.mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF", + BIT_ULL(0)); } return IRQ_HANDLED; @@ -293,9 +308,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) int err; mbox->pfvf = vf; - vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | - WQ_MEM_RECLAIM, 1); + vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!vf->mbox_wq) return -ENOMEM; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 17e6ac444..fecf3dd22 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -561,8 +561,7 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); mcr_new = mcr_cur; mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | - MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK | - MAC_MCR_RX_FIFO_CLR_DIS; + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS; /* Only update control register when needed! */ if (mcr_new != mcr_cur) @@ -610,7 +609,7 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, phylink_config); u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN); + mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK); mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } @@ -649,7 +648,7 @@ static void mtk_mac_link_up(struct phylink_config *config, if (rx_pause) mcr |= MAC_MCR_FORCE_RX_FC; - mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN; + mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK; mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index d6eed2045..c64211e22 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -811,7 +811,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe) MTK_PPE_KEEPALIVE_DISABLE) | FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, - MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | + MTK_PPE_SCAN_MODE_CHECK_AGE) | FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, MTK_PPE_ENTRIES_SHIFT); if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) @@ -895,17 +895,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe) mtk_ppe_cache_enable(ppe, false); - /* disable offload engine */ - ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); - ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); - /* disable aging */ val = MTK_PPE_TB_CFG_AGE_NON_L4 | MTK_PPE_TB_CFG_AGE_UNBIND | MTK_PPE_TB_CFG_AGE_TCP | MTK_PPE_TB_CFG_AGE_UDP | - MTK_PPE_TB_CFG_AGE_TCP_FIN; + MTK_PPE_TB_CFG_AGE_TCP_FIN | + MTK_PPE_TB_CFG_SCAN_MODE; ppe_clear(ppe, MTK_PPE_TB_CFG, val); - return mtk_ppe_wait_busy(ppe); + if (mtk_ppe_wait_busy(ppe)) + return -ETIMEDOUT; + + /* disable offload engine */ + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); + + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 83c465939..d6b4d163b 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/rtnetlink.h> #include <linux/skbuff.h> #include "mlxbf_gige.h" @@ -139,13 +140,10 @@ static int mlxbf_gige_open(struct net_device *netdev) control |= MLXBF_GIGE_CONTROL_PORT_EN; writeq(control, priv->base + MLXBF_GIGE_CONTROL); - err = mlxbf_gige_request_irqs(priv); - if (err) - return err; mlxbf_gige_cache_stats(priv); err = mlxbf_gige_clean_port(priv); if (err) - goto free_irqs; + return err; /* Clear driver's valid_polarity to match hardware, * since the above call to clean_port() resets the @@ -157,7 +155,7 @@ static int mlxbf_gige_open(struct net_device *netdev) err = mlxbf_gige_tx_init(priv); if (err) - goto free_irqs; + goto phy_deinit; err = mlxbf_gige_rx_init(priv); if (err) goto tx_deinit; @@ -166,6 +164,10 @@ static int mlxbf_gige_open(struct net_device *netdev) napi_enable(&priv->napi); netif_start_queue(netdev); + err = mlxbf_gige_request_irqs(priv); + if (err) + goto napi_deinit; + /* Set bits in INT_EN that we care about */ int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR | MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS | @@ -182,11 +184,17 @@ static int mlxbf_gige_open(struct net_device *netdev) return 0; +napi_deinit: + netif_stop_queue(netdev); + napi_disable(&priv->napi); + netif_napi_del(&priv->napi); + mlxbf_gige_rx_deinit(priv); + tx_deinit: mlxbf_gige_tx_deinit(priv); -free_irqs: - mlxbf_gige_free_irqs(priv); +phy_deinit: + phy_stop(phydev); return err; } @@ -410,8 +418,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev) { struct mlxbf_gige *priv = platform_get_drvdata(pdev); - writeq(0, priv->base + MLXBF_GIGE_INT_EN); - mlxbf_gige_clean_port(priv); + rtnl_lock(); + netif_device_detach(priv->netdev); + + if (netif_running(priv->netdev)) + dev_close(priv->netdev); + + rtnl_unlock(); } static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = { diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e804613fa..d5123e8c4 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -25,6 +25,8 @@ #define PCS_POWER_STATE_DOWN 0x6 #define PCS_POWER_STATE_UP 0x4 +#define RFE_RD_FIFO_TH_3_DWORDS 0x3 + static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter) { u32 chip_rev; @@ -3217,6 +3219,21 @@ static void lan743x_full_cleanup(struct lan743x_adapter *adapter) lan743x_pci_cleanup(adapter); } +static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter) +{ + u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_; + + if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) { + u32 misc_ctl; + + misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0); + misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_; + misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_, + RFE_RD_FIFO_TH_3_DWORDS); + lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl); + } +} + static int lan743x_hardware_init(struct lan743x_adapter *adapter, struct pci_dev *pdev) { @@ -3232,6 +3249,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, pci11x1x_strap_get_status(adapter); spin_lock_init(&adapter->eth_syslock_spinlock); mutex_init(&adapter->sgmii_rw_lock); + pci11x1x_set_rfe_rd_fifo_threshold(adapter); } else { adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 67877d3b6..d304be17b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -26,6 +26,7 @@ #define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) #define ID_REV_CHIP_REV_A0_ (0x00000000) #define ID_REV_CHIP_REV_B0_ (0x00000010) +#define ID_REV_CHIP_REV_PCI11X1X_B0_ (0x000000B0) #define FPGA_REV (0x04) #define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF) @@ -311,6 +312,9 @@ #define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8) #define SGMII_CTL_SGMII_POWER_DN_ BIT(1) +#define MISC_CTL_0 (0x920) +#define MISC_CTL_0_RFE_READ_FIFO_MASK_ GENMASK(6, 4) + /* Vendor Specific SGMII MMD details */ #define SR_VSMMD_PCS_ID1 0x0004 #define SR_VSMMD_PCS_ID2 0x0005 diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index e92860e20..c6a2c302a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -308,6 +308,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work) acti_netdevs = kmalloc_array(entry->slave_cnt, sizeof(*acti_netdevs), GFP_KERNEL); + if (!acti_netdevs) { + schedule_delayed_work(&lag->work, + NFP_FL_LAG_DELAY); + continue; + } /* Include sanity check in the loop. It may be that a bond has * changed between processing the last notification and the diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 06663c11c..6e3417712 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -615,6 +615,9 @@ struct rtl8169_private { struct work_struct work; } wk; + spinlock_t config25_lock; + spinlock_t mac_ocp_lock; + unsigned supports_gmii:1; unsigned aspm_manageable:1; unsigned dash_enabled:1; @@ -678,6 +681,28 @@ static void rtl_pci_commit(struct rtl8169_private *tp) RTL_R8(tp, ChipCmd); } +static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set) +{ + unsigned long flags; + u8 val; + + spin_lock_irqsave(&tp->config25_lock, flags); + val = RTL_R8(tp, Config2); + RTL_W8(tp, Config2, (val & ~clear) | set); + spin_unlock_irqrestore(&tp->config25_lock, flags); +} + +static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) +{ + unsigned long flags; + u8 val; + + spin_lock_irqsave(&tp->config25_lock, flags); + val = RTL_R8(tp, Config5); + RTL_W8(tp, Config5, (val & ~clear) | set); + spin_unlock_irqrestore(&tp->config25_lock, flags); +} + static bool rtl_is_8125(struct rtl8169_private *tp) { return tp->mac_version >= RTL_GIGA_MAC_VER_61; @@ -850,7 +875,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) (RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT; } -static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +static void __r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { if (rtl_ocp_reg_failure(reg)) return; @@ -858,7 +883,16 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data); } -static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + unsigned long flags; + + spin_lock_irqsave(&tp->mac_ocp_lock, flags); + __r8168_mac_ocp_write(tp, reg, data); + spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); +} + +static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) { if (rtl_ocp_reg_failure(reg)) return 0; @@ -868,12 +902,28 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) return RTL_R32(tp, OCPDR); } +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + unsigned long flags; + u16 val; + + spin_lock_irqsave(&tp->mac_ocp_lock, flags); + val = __r8168_mac_ocp_read(tp, reg); + spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + + return val; +} + static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, u16 set) { - u16 data = r8168_mac_ocp_read(tp, reg); + unsigned long flags; + u16 data; - r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); + spin_lock_irqsave(&tp->mac_ocp_lock, flags); + data = __r8168_mac_ocp_read(tp, reg); + __r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); + spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); } /* Work around a hw issue with RTL8168g PHY, the quirk disables @@ -1135,17 +1185,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01); } +static void rtl_dash_loop_wait(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long usecs, int n, bool high) +{ + if (!tp->dash_enabled) + return; + rtl_loop_wait(tp, c, usecs, n, high); +} + +static void rtl_dash_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + rtl_dash_loop_wait(tp, c, d, n, true); +} + +static void rtl_dash_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned long d, int n) +{ + rtl_dash_loop_wait(tp, c, d, n, false); +} + static void rtl8168dp_driver_start(struct rtl8169_private *tp) { r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); - rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); + rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10); } static void rtl8168ep_driver_start(struct rtl8169_private *tp) { r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); - rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); + rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); } static void rtl8168_driver_start(struct rtl8169_private *tp) @@ -1159,7 +1232,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp) static void rtl8168dp_driver_stop(struct rtl8169_private *tp) { r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); - rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); + rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10); } static void rtl8168ep_driver_stop(struct rtl8169_private *tp) @@ -1167,7 +1240,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp) rtl8168ep_stop_cmac(tp); r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); - rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); + rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10); } static void rtl8168_driver_stop(struct rtl8169_private *tp) @@ -1351,6 +1424,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { WAKE_MAGIC, Config3, MagicPacket } }; unsigned int i, tmp = ARRAY_SIZE(cfg); + unsigned long flags; u8 options; rtl_unlock_config_regs(tp); @@ -1369,12 +1443,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); } + spin_lock_irqsave(&tp->config25_lock, flags); for (i = 0; i < tmp; i++) { options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(tp, cfg[i].reg, options); } + spin_unlock_irqrestore(&tp->config25_lock, flags); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: @@ -1386,10 +1462,10 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: - options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) - options |= PME_SIGNAL; - RTL_W8(tp, Config2, options); + rtl_mod_config2(tp, 0, PME_SIGNAL); + else + rtl_mod_config2(tp, PME_SIGNAL, 0); break; default: break; @@ -2696,10 +2772,13 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp) static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) { + if (tp->mac_version < RTL_GIGA_MAC_VER_32) + return; + /* Don't enable ASPM in the chip if OS can't control ASPM */ if (enable && tp->aspm_manageable) { - RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); - RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + rtl_mod_config5(tp, 0, ASPM_en); + rtl_mod_config2(tp, 0, ClkReqEn); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: @@ -2722,11 +2801,9 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) break; } - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_mod_config2(tp, ClkReqEn, 0); + rtl_mod_config5(tp, ASPM_en, 0); } - - udelay(10); } static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat, @@ -2884,7 +2961,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + rtl_mod_config5(tp, Spi_en, 0); } static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) @@ -2917,7 +2994,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + rtl_mod_config5(tp, Spi_en, 0); rtl_hw_aspm_clkreq_enable(tp, true); } @@ -2940,7 +3017,7 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + rtl_mod_config5(tp, Spi_en, 0); rtl8168_config_eee_mac(tp); } @@ -5032,6 +5109,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp) struct mii_bus *new_bus; int ret; + /* On some boards with this chip version the BIOS is buggy and misses + * to reset the PHY page selector. This results in the PHY ID read + * accessing registers on a different page, returning a more or + * less random value. Fix this by resetting the page selector first. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_25 || + tp->mac_version == RTL_GIGA_MAC_VER_26) + r8169_mdio_write(tp, 0x1f, 0); + new_bus = devm_mdiobus_alloc(&pdev->dev); if (!new_bus) return -ENOMEM; @@ -5062,7 +5148,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp) return -EUNATCH; } - tp->phydev->mac_managed_pm = 1; + tp->phydev->mac_managed_pm = true; phy_support_asym_pause(tp->phydev); @@ -5200,6 +5286,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->eee_adv = -1; tp->ocp_base = OCP_STD_PHY_BASE; + spin_lock_init(&tp->config25_lock); + spin_lock_init(&tp->mac_ocp_lock); + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, struct pcpu_sw_netstats); if (!dev->tstats) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index e7b700062..756ac4a07 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1290,25 +1290,16 @@ static int ravb_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - bool gptp = info->gptp || info->ccc_gac; - struct ravb_rx_desc *desc; unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); int quota = budget; - unsigned int entry; + bool unmask; - if (!gptp) { - entry = priv->cur_rx[q] % priv->num_rx_ring[q]; - desc = &priv->gbeth_rx_ring[entry]; - } /* Processing RX Descriptor Ring */ /* Clear RX interrupt */ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - if (gptp || desc->die_dt != DT_FEMPTY) { - if (ravb_rx(ndev, "a, q)) - goto out; - } + unmask = !ravb_rx(ndev, "a, q); /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); @@ -1318,6 +1309,18 @@ static int ravb_poll(struct napi_struct *napi, int budget) netif_wake_subqueue(ndev, q); spin_unlock_irqrestore(&priv->lock, flags); + /* Receive error message handling */ + priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; + if (info->nc_queues) + priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; + if (priv->rx_over_errors != ndev->stats.rx_over_errors) + ndev->stats.rx_over_errors = priv->rx_over_errors; + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) + ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; + + if (!unmask) + goto out; + napi_complete(napi); /* Re-enable RX/TX interrupts */ @@ -1331,14 +1334,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) } spin_unlock_irqrestore(&priv->lock, flags); - /* Receive error message handling */ - priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; - if (info->nc_queues) - priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; - if (priv->rx_over_errors != ndev->stats.rx_over_errors) - ndev->stats.rx_over_errors = priv->rx_over_errors; - if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) - ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; out: return budget - quota; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 84276eb68..39112d5cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -87,19 +87,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw, u32 prio, u32 queue) { void __iomem *ioaddr = hw->pcsr; - u32 base_register; - u32 value; + u32 clear_mask = 0; + u32 ctrl2, ctrl3; + int i; - base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; - if (queue >= 4) - queue -= 4; + ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2); + ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3); - value = readl(ioaddr + base_register); + /* The software must ensure that the same priority + * is not mapped to multiple Rx queues + */ + for (i = 0; i < 4; i++) + clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) & + GMAC_RXQCTRL_PSRQX_MASK(i)); - value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue); - value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & + ctrl2 &= ~clear_mask; + ctrl3 &= ~clear_mask; + + /* First assign new priorities to a queue, then + * clear them from others queues + */ + if (queue < 4) { + ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & GMAC_RXQCTRL_PSRQX_MASK(queue); - writel(value, ioaddr + base_register); + + writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2); + writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3); + } else { + queue -= 4; + + ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & + GMAC_RXQCTRL_PSRQX_MASK(queue); + + writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3); + writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2); + } } static void dwmac4_tx_queue_priority(struct mac_device_info *hw, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index ec1616ffb..dd73f38ec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -97,17 +97,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, u32 queue) { void __iomem *ioaddr = hw->pcsr; - u32 value, reg; + u32 clear_mask = 0; + u32 ctrl2, ctrl3; + int i; - reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; - if (queue >= 4) + ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2); + ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3); + + /* The software must ensure that the same priority + * is not mapped to multiple Rx queues + */ + for (i = 0; i < 4; i++) + clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) & + XGMAC_PSRQ(i)); + + ctrl2 &= ~clear_mask; + ctrl3 &= ~clear_mask; + + /* First assign new priorities to a queue, then + * clear them from others queues + */ + if (queue < 4) { + ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) & + XGMAC_PSRQ(queue); + + writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2); + writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3); + } else { queue -= 4; - value = readl(ioaddr + reg); - value &= ~XGMAC_PSRQ(queue); - value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue); + ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) & + XGMAC_PSRQ(queue); - writel(value, ioaddr + reg); + writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3); + writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2); + } } static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio, diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 6bf5e341c..08c45756b 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1445,7 +1445,7 @@ static int temac_probe(struct platform_device *pdev) } /* map device registers */ - lp->regs = devm_platform_ioremap_resource_byname(pdev, 0); + lp->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map TEMAC registers\n"); return -ENOMEM; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 267e6fd3d..57411ee1d 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -380,7 +380,7 @@ static int dp83822_config_init(struct phy_device *phydev) { struct dp83822_private *dp83822 = phydev->priv; struct device *dev = &phydev->mdio.dev; - int rgmii_delay; + int rgmii_delay = 0; s32 rx_int_delay; s32 tx_int_delay; int err = 0; @@ -390,30 +390,33 @@ static int dp83822_config_init(struct phy_device *phydev) rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, true); - if (rx_int_delay <= 0) - rgmii_delay = 0; - else - rgmii_delay = DP83822_RX_CLK_SHIFT; + /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */ + if (rx_int_delay > 0) + rgmii_delay |= DP83822_RX_CLK_SHIFT; tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, false); + + /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */ if (tx_int_delay <= 0) - rgmii_delay &= ~DP83822_TX_CLK_SHIFT; - else rgmii_delay |= DP83822_TX_CLK_SHIFT; - if (rgmii_delay) { - err = phy_set_bits_mmd(phydev, DP83822_DEVADDR, - MII_DP83822_RCSR, rgmii_delay); - if (err) - return err; - } + err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR, + DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay); + if (err) + return err; + + err = phy_set_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); - phy_set_bits_mmd(phydev, DP83822_DEVADDR, - MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); + if (err) + return err; } else { - phy_clear_bits_mmd(phydev, DP83822_DEVADDR, - MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); + err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); + + if (err) + return err; } if (dp83822->fx_enabled) { diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9481f1728..2cbb1d183 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2188,6 +2188,7 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) struct hwtstamp_config config; int txcfg = 0, rxcfg = 0; int pkt_ts_enable; + int tx_mod; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; @@ -2237,9 +2238,14 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable); lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable); - if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) + tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD); + if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) { lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD, - PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_); + tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_); + } else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) { + lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD, + tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_); + } if (config.rx_filter != HWTSTAMP_FILTER_NONE) lan8814_config_ts_intr(ptp_priv->phydev, true); @@ -2297,7 +2303,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts, } } -static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig) +static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig) { struct ptp_header *ptp_header; u32 type; @@ -2307,7 +2313,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig) ptp_header = ptp_parse_header(skb, type); skb_pull_inline(skb, ETH_HLEN); + if (!ptp_header) + return false; + *sig = (__force u16)(ntohs(ptp_header->sequence_id)); + return true; } static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv, @@ -2319,7 +2329,8 @@ static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv, bool ret = false; u16 skb_sig; - lan8814_get_sig_rx(skb, &skb_sig); + if (!lan8814_get_sig_rx(skb, &skb_sig)) + return ret; /* Iterate over all RX timestamps and match it with the received skbs */ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags); @@ -2599,7 +2610,7 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm) return 0; } -static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig) +static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig) { struct ptp_header *ptp_header; u32 type; @@ -2607,7 +2618,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig) type = ptp_classify_raw(skb); ptp_header = ptp_parse_header(skb, type); + if (!ptp_header) + return false; + *sig = (__force u16)(ntohs(ptp_header->sequence_id)); + return true; } static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv) @@ -2625,7 +2640,8 @@ static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv) spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags); skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) { - lan8814_get_sig_tx(skb, &skb_sig); + if (!lan8814_get_sig_tx(skb, &skb_sig)) + continue; if (memcmp(&skb_sig, &seq_id, sizeof(seq_id))) continue; @@ -2669,7 +2685,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv, spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags); skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) { - lan8814_get_sig_rx(skb, &skb_sig); + if (!lan8814_get_sig_rx(skb, &skb_sig)) + continue; if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id))) continue; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 944f76e6f..f25b0d338 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -2640,8 +2640,8 @@ EXPORT_SYMBOL(genphy_resume); int genphy_loopback(struct phy_device *phydev, bool enable) { if (enable) { - u16 val, ctl = BMCR_LOOPBACK; - int ret; + u16 ctl = BMCR_LOOPBACK; + int ret, val; ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); @@ -2893,7 +2893,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, if (delay < 0) return delay; - if (delay && size == 0) + if (size == 0) return delay; if (delay < delay_values[0] || delay > delay_values[size - 1]) { diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 6eacbf17f..34cd568b2 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -714,7 +714,7 @@ static int ax88772_init_phy(struct usbnet *dev) } phy_suspend(priv->phydev); - priv->phydev->mac_managed_pm = 1; + priv->phydev->mac_managed_pm = true; phy_attached_info(priv->phydev); @@ -734,7 +734,7 @@ static int ax88772_init_phy(struct usbnet *dev) return -ENODEV; } - priv->phydev_int->mac_managed_pm = 1; + priv->phydev_int->mac_managed_pm = true; phy_suspend(priv->phydev_int); return 0; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index d837c1887..e0e9b4c53 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1273,6 +1273,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev) if (is_valid_ether_addr(mac)) { eth_hw_addr_set(dev->net, mac); + if (!is_local_ether_addr(mac)) + dev->net->addr_assign_type = NET_ADDR_PERM; } else { netdev_info(dev->net, "invalid MAC address, using random\n"); eth_hw_addr_random(dev->net); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index a530f20ee..2fa46baa5 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -2105,6 +2105,11 @@ static const struct usb_device_id products[] = { .driver_info = (unsigned long) &smsc95xx_info, }, { + /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */ + USB_DEVICE(0x0878, 0x1400), + .driver_info = (unsigned long)&smsc95xx_info, + }, + { /* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */ USB_DEVICE(0x184F, 0x0051), .driver_info = (unsigned long)&smsc95xx_info, diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index f5e19f3ef..4de514482 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf) data->eeprom_len = SR9800_EEPROM_LEN; - usbnet_get_endpoints(dev, intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + goto out; /* LED Setting Rule : * AABB:CCDD diff --git a/drivers/net/veth.c b/drivers/net/veth.c index dd9f5f146..8dcd3b6e1 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1444,8 +1444,6 @@ static netdev_features_t veth_fix_features(struct net_device *dev, if (peer_priv->_xdp_prog) features &= ~NETIF_F_GSO_SOFTWARE; } - if (priv->_xdp_prog) - features |= NETIF_F_GRO; return features; } @@ -1542,14 +1540,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, } if (!old_prog) { - if (!veth_gro_requested(dev)) { - /* user-space did not require GRO, but adding - * XDP is supposed to get GRO working - */ - dev->features |= NETIF_F_GRO; - netdev_features_change(dev); - } - peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; peer->max_mtu = max_mtu; } @@ -1560,14 +1550,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (dev->flags & IFF_UP) veth_disable_xdp(dev); - /* if user-space did not require GRO, since adding XDP - * enabled it, clear it now - */ - if (!veth_gro_requested(dev)) { - dev->features &= ~NETIF_F_GRO; - netdev_features_change(dev); - } - if (peer) { peer->hw_features |= NETIF_F_GSO_SOFTWARE; peer->max_mtu = ETH_MAX_MTU; diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index 6d1bd9f52..81b716e66 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx) if (!allowedips_node) goto no_allowedips; if (!ctx->allowedips_seq) - ctx->allowedips_seq = peer->device->peer_allowedips.seq; - else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq) + ctx->allowedips_seq = ctx->wg->peer_allowedips.seq; + else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq) goto no_allowedips; allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS); @@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) if (!peers_nest) goto out; ret = 0; - /* If the last cursor was removed via list_del_init in peer_remove, then + lockdep_assert_held(&wg->device_update_lock); + /* If the last cursor was removed in peer_remove or peer_remove_all, then * we just treat this the same as there being no more peers left. The * reason is that seq_nr should indicate to userspace that this isn't a * coherent dump anyway, so they'll try again. */ if (list_empty(&wg->peer_list) || - (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) { + (ctx->next_peer && ctx->next_peer->is_dead)) { nla_nest_cancel(skb, peers_nest); goto out; } - lockdep_assert_held(&wg->device_update_lock); peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list); list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { if (get_peer(peer, skb, ctx)) { diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index a176653c8..db01ec03b 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -251,7 +251,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || - keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { + READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) { WRITE_ONCE(keypair->receiving.is_valid, false); return false; } @@ -318,7 +318,7 @@ static bool counter_validate(struct noise_replay_counter *counter, u64 their_cou for (i = 1; i <= top; ++i) counter->backtrack[(i + index_current) & ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; - counter->counter = their_counter; + WRITE_ONCE(counter->counter, their_counter); } index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; @@ -463,7 +463,7 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget) net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", peer->device->dev->name, PACKET_CB(skb)->nonce, - keypair->receiving_counter.counter); + READ_ONCE(keypair->receiving_counter.counter)); goto next; } diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 876410a47..4d5009604 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -844,6 +844,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb, } ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } arg->desc_id = ev->desc_id; arg->status = ev->status; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 21c6b36dc..51fc77e93 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2112,6 +2112,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + /* Initialize rx_mcs_160 to 9 which is an invalid value */ + rx_mcs_160 = 9; if (support_160) { for (i = 7; i >= 0; i--) { u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; @@ -2123,6 +2125,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, } } + /* Initialize rx_mcs_80 to 9 which is an invalid value */ + rx_mcs_80 = 9; for (i = 7; i >= 0; i--) { u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 237f4ec2c..6c33e898b 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -306,7 +306,6 @@ struct ath9k_htc_tx { DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM); struct timer_list cleanup_timer; spinlock_t tx_lock; - bool initialized; }; struct ath9k_htc_tx_ctl { @@ -515,6 +514,7 @@ struct ath9k_htc_priv { unsigned long ps_usecount; bool ps_enabled; bool ps_idle; + bool initialized; #ifdef CONFIG_MAC80211_LEDS enum led_brightness brightness; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 96a3185a9..b01418537 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, htc_handle->drv_priv = priv; + /* Allow ath9k_wmi_event_tasklet() to operate. */ + smp_wmb(); + priv->initialized = true; + return 0; err_init: diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index d6a3f001d..2fdd27885 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv) skb_queue_head_init(&priv->tx.data_vo_queue); skb_queue_head_init(&priv->tx.tx_failed); - /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */ - smp_wmb(); - priv->tx.initialized = true; - return 0; } diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index 1476b42b5..805ad31ed 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t) } spin_unlock_irqrestore(&wmi->wmi_lock, flags); + /* Check if ath9k_htc_probe_device() completed. */ + if (!data_race(priv->initialized)) { + kfree_skb(skb); + continue; + } + hdr = (struct wmi_cmd_hdr *) skb->data; cmd_id = be16_to_cpu(hdr->command_id); wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr)); @@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t) &wmi->drv_priv->fatal_work); break; case WMI_TXSTATUS_EVENTID: - /* Check if ath9k_tx_init() completed. */ - if (!data_race(priv->tx.initialized)) - break; - spin_lock_bh(&priv->tx.tx_lock); if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { spin_unlock_bh(&priv->tx.tx_lock); diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h index 67b4bac04..c0d8fc0b2 100644 --- a/drivers/net/wireless/broadcom/b43/b43.h +++ b/drivers/net/wireless/broadcom/b43/b43.h @@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev) return dev->__using_pio_transfers; } +static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio) +{ + if (dev->qos_enabled) + ieee80211_wake_queue(dev->wl->hw, queue_prio); + else + ieee80211_wake_queue(dev->wl->hw, 0); +} + +static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio) +{ + if (dev->qos_enabled) + ieee80211_stop_queue(dev->wl->hw, queue_prio); + else + ieee80211_stop_queue(dev->wl->hw, 0); +} + /* Message printing */ __printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...); __printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...); diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index 9a7c62bd5..cfaf2f9d6 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) should_inject_overflow(ring)) { /* This TX ring is full. */ unsigned int skb_mapping = skb_get_queue_mapping(skb); - ieee80211_stop_queue(dev->wl->hw, skb_mapping); + b43_stop_queue(dev, skb_mapping); dev->wl->tx_queue_stopped[skb_mapping] = true; ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { @@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } else { /* If the driver queue is running wake the corresponding * mac80211 queue. */ - ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); + b43_wake_queue(dev, ring->queue_prio); if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index b2539a916..bdfa68cc7 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work) start_ieee80211: wl->hw->queues = B43_QOS_QUEUE_NUM; - if (!modparam_qos || dev->fw.opensource) + if (!modparam_qos || dev->fw.opensource || + dev->dev->chip_id == BCMA_CHIP_ID_BCM4331) wl->hw->queues = 1; err = ieee80211_register_hw(wl->hw); @@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work) err = b43_dma_tx(dev, skb); if (err == -ENOSPC) { wl->tx_queue_stopped[queue_num] = true; - ieee80211_stop_queue(wl->hw, queue_num); + b43_stop_queue(dev, queue_num); skb_queue_head(&wl->tx_queue[queue_num], skb); break; } @@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct b43_wl *wl = hw_to_b43_wl(hw); + u16 skb_queue_mapping; if (unlikely(skb->len < 2 + 2 + 6)) { /* Too short, this can't be a valid frame. */ @@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw, } B43_WARN_ON(skb_shinfo(skb)->nr_frags); - skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb); - if (!wl->tx_queue_stopped[skb->queue_mapping]) { + skb_queue_mapping = skb_get_queue_mapping(skb); + skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb); + if (!wl->tx_queue_stopped[skb_queue_mapping]) ieee80211_queue_work(wl->hw, &wl->tx_work); - } else { - ieee80211_stop_queue(wl->hw, skb->queue_mapping); - } + else + b43_stop_queue(wl->current_dev, skb_queue_mapping); } static void b43_qos_params_upload(struct b43_wldev *dev, diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c index 8c28a9250..cc19b589f 100644 --- a/drivers/net/wireless/broadcom/b43/pio.c +++ b/drivers/net/wireless/broadcom/b43/pio.c @@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) if (total_len > (q->buffer_size - q->buffer_used)) { /* Not enough memory on the queue. */ err = -EBUSY; - ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + b43_stop_queue(dev, skb_get_queue_mapping(skb)); q->stopped = true; goto out; } @@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || (q->free_packet_slots == 0)) { /* The queue is full. */ - ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + b43_stop_queue(dev, skb_get_queue_mapping(skb)); q->stopped = true; } @@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev, list_add(&pack->list, &q->packets_list); if (q->stopped) { - ieee80211_wake_queue(dev->wl->hw, q->queue_prio); + b43_wake_queue(dev, q->queue_prio); q->stopped = false; } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index ad5a8d61d..24a3d5a59 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -791,8 +791,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, scan_request = cfg->scan_request; cfg->scan_request = NULL; - if (timer_pending(&cfg->escan_timeout)) - del_timer_sync(&cfg->escan_timeout); + timer_delete_sync(&cfg->escan_timeout); if (fw_abort) { /* Do a scan abort to stop the driver's scan engine */ @@ -7805,6 +7804,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) brcmf_btcoex_detach(cfg); wiphy_unregister(cfg->wiphy); wl_deinit_priv(cfg); + cancel_work_sync(&cfg->escan_timeout_work); brcmf_free_wiphy(cfg->wiphy); kfree(cfg); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c index ccc621b8e..4a1fe982a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c @@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp) return sh; } -static void wlc_phy_timercb_phycal(struct brcms_phy *pi) +static void wlc_phy_timercb_phycal(void *ptr) { + struct brcms_phy *pi = ptr; uint delay = 5; if (PHY_PERICAL_MPHASE_PENDING(pi)) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c index a0de5db0c..b72381791 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c @@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim) } struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, - void (*fn)(struct brcms_phy *pi), + void (*fn)(void *pi), void *arg, const char *name) { return (struct wlapi_timer *) - brcms_init_timer(physhim->wl, (void (*)(void *))fn, - arg, name); + brcms_init_timer(physhim->wl, fn, arg, name); } void wlapi_free_timer(struct wlapi_timer *t) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h index dd8774717..27d0934e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h @@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim); /* PHY to WL utility functions */ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, - void (*fn)(struct brcms_phy *pi), + void (*fn)(void *pi), void *arg, const char *name); void wlapi_free_timer(struct wlapi_timer *t); void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index f5fcc547d..235963e1d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -725,7 +725,7 @@ read_table: * from index 1, so the maximum value allowed here is * ACPI_SAR_PROFILES_NUM - 1. */ - if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { + if (n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 3ef0b776b..3b0ed1cdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2903,8 +2903,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) struct iwl_fw_dbg_params params = {0}; struct iwl_fwrt_dump_data *dump_data = &fwrt->dump.wks[wk_idx].dump_data; - u32 policy; - u32 time_point; if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; @@ -2935,13 +2933,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); - policy = le32_to_cpu(dump_data->trig->apply_policy); - time_point = le32_to_cpu(dump_data->trig->time_point); + if (iwl_trans_dbg_ini_valid(fwrt->trans)) { + u32 policy = le32_to_cpu(dump_data->trig->apply_policy); + u32 time_point = le32_to_cpu(dump_data->trig->time_point); - if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { - IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); - iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); + if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { + IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); + iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); + } } + if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) iwl_force_nmi(fwrt->trans); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 5979d904b..677c9e0b4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -103,6 +103,12 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans, if (le32_to_cpu(tlv->length) != sizeof(*debug_info)) return -EINVAL; + /* we use this as a string, ensure input was NUL terminated */ + if (strnlen(debug_info->debug_cfg_name, + sizeof(debug_info->debug_cfg_name)) == + sizeof(debug_info->debug_cfg_name)) + return -EINVAL; + IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n", debug_info->debug_cfg_name); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 2748459d1..88f4f429d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -461,12 +461,10 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, struct wowlan_key_rsc_v5_data data = {}; int i; - data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); + data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL); if (!data.rsc) return -ENOMEM; - memset(data.rsc, 0xff, sizeof(*data.rsc)); - for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) data.rsc->mcast_key_id_map[i] = IWL_MCAST_KEY_MAP_INVALID; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index bb77bc9aa..fb2408c05 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -122,14 +122,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm) if (ret) return ERR_PTR(ret); - if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size)) + if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != + resp_size)) { + iwl_free_resp(&cmd); return ERR_PTR(-EIO); + } resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL); + iwl_free_resp(&cmd); + if (!resp) return ERR_PTR(-ENOMEM); - iwl_free_resp(&cmd); return resp; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index f268a31ce..105f283b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -299,6 +299,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, u32 status, struct ieee80211_rx_status *stats) { + struct wireless_dev *wdev; struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; u8 keyid; @@ -320,9 +321,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, if (!ieee80211_is_beacon(hdr->frame_control)) return 0; + if (!sta) + return -1; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + /* key mismatch - will also report !MIC_OK but we shouldn't count it */ if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID)) - return -1; + goto report; /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && @@ -331,13 +338,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, return 0; } - if (!sta) - return -1; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - /* * both keys will have the same cipher and MIC length, use * whichever one is available @@ -346,11 +346,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, if (!key) { key = rcu_dereference(mvmvif->bcn_prot.keys[1]); if (!key) - return -1; + goto report; } if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2) - return -1; + goto report; /* get the real key ID */ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2]; @@ -364,7 +364,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, return -1; key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]); if (!key) - return -1; + goto report; } /* Report status to mac80211 */ @@ -372,6 +372,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, ieee80211_key_mic_failure(key); else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR) ieee80211_key_replay(key); +report: + wdev = ieee80211_vif_to_wdev(mvmsta->vif); + if (wdev->netdev) + cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len); return -1; } diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 104d2b6dc..5a525da43 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv) if (!cmdarray[i].cmdbuf) { lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n"); ret = -1; - goto done; + goto free_cmd_array; } } @@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv) init_waitqueue_head(&cmdarray[i].cmdwait_q); lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]); } - ret = 0; + return 0; +free_cmd_array: + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + if (cmdarray[i].cmdbuf) { + kfree(cmdarray[i].cmdbuf); + cmdarray[i].cmdbuf = NULL; + } + } + kfree(priv->cmd_array); + priv->cmd_array = NULL; done: return ret; } diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 63f232c72..55ca5b287 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -964,9 +964,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name, mwifiex_dfs_dir); - if (!priv->dfs_dev_dir) - return; - MWIFIEX_DFS_ADD_FILE(info); MWIFIEX_DFS_ADD_FILE(debug); MWIFIEX_DFS_ADD_FILE(getlog); diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index b545d93c6..6f3245a43 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -1615,7 +1615,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) cfg80211_unregister_netdevice(vif->ndev); vif->monitor_flag = 0; - wilc_set_operation_mode(vif, 0, 0, 0); mutex_lock(&wl->vif_mutex); list_del_rcu(&vif->list); wl->vif_num--; @@ -1810,15 +1809,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, INIT_LIST_HEAD(&wl->rxq_head.list); INIT_LIST_HEAD(&wl->vif_list); + wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + wiphy_name(wl->wiphy)); + if (!wl->hif_workqueue) { + ret = -ENOMEM; + goto free_cfg; + } vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE, NL80211_IFTYPE_STATION, false); if (IS_ERR(vif)) { ret = PTR_ERR(vif); - goto free_cfg; + goto free_hq; } return 0; +free_hq: + destroy_workqueue(wl->hif_workqueue); + free_cfg: wilc_wlan_cfg_deinit(wl); diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index a1b75feec..5eb02902e 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -374,38 +374,49 @@ out: void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, struct cfg80211_crypto_settings *crypto) { - struct wilc_join_bss_param *param; - struct ieee80211_p2p_noa_attr noa_attr; - u8 rates_len = 0; - const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie; + const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie; const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie; + struct ieee80211_p2p_noa_attr noa_attr; + const struct cfg80211_bss_ies *ies; + struct wilc_join_bss_param *param; + u8 rates_len = 0, ies_len; int ret; - const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies); param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) return NULL; + rcu_read_lock(); + ies = rcu_dereference(bss->ies); + ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC); + if (!ies_data) { + rcu_read_unlock(); + kfree(param); + return NULL; + } + ies_len = ies->len; + rcu_read_unlock(); + param->beacon_period = cpu_to_le16(bss->beacon_interval); param->cap_info = cpu_to_le16(bss->capability); param->bss_type = WILC_FW_BSS_TYPE_INFRA; param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq); ether_addr_copy(param->bssid, bss->bssid); - ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); + ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len); if (ssid_elm) { if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN) memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]); } - tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len); + tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len); if (tim_elm && tim_elm[1] >= 2) param->dtim_period = tim_elm[3]; memset(param->p_suites, 0xFF, 3); memset(param->akm_suites, 0xFF, 3); - rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len); + rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len); if (rates_ie) { rates_len = rates_ie[1]; if (rates_len > WILC_MAX_RATES_SUPPORTED) @@ -416,7 +427,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, if (rates_len < WILC_MAX_RATES_SUPPORTED) { supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, - ies->data, ies->len); + ies_data, ies_len); if (supp_rates_ie) { u8 ext_rates = supp_rates_ie[1]; @@ -431,11 +442,11 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, } } - ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len); + ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len); if (ht_ie) param->ht_capable = true; - ret = cfg80211_get_p2p_attr(ies->data, ies->len, + ret = cfg80211_get_p2p_attr(ies_data, ies_len, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *)&noa_attr, sizeof(noa_attr)); if (ret > 0) { @@ -459,7 +470,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, } wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, - ies->data, ies->len); + ies_data, ies_len); if (wmm_ie) { struct ieee80211_wmm_param_ie *ie; @@ -474,13 +485,13 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, - ies->data, ies->len); + ies_data, ies_len); if (wpa_ie) { param->mode_802_11i = 1; param->rsn_found = true; } - rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len); + rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len); if (rsn_ie) { int rsn_ie_len = sizeof(struct element) + rsn_ie[1]; int offset = 8; @@ -514,6 +525,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, param->akm_suites[i] = crypto->akm_suites[i] & 0xFF; } + kfree(ies_data); return (void *)param; } diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index e9f59de31..b714da48e 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -878,8 +878,7 @@ static const struct net_device_ops wilc_netdev_ops = { void wilc_netdev_cleanup(struct wilc *wilc) { - struct wilc_vif *vif; - int srcu_idx, ifc_cnt = 0; + struct wilc_vif *vif, *vif_tmp; if (!wilc) return; @@ -889,32 +888,19 @@ void wilc_netdev_cleanup(struct wilc *wilc) wilc->firmware = NULL; } - srcu_idx = srcu_read_lock(&wilc->srcu); - list_for_each_entry_rcu(vif, &wilc->vif_list, list) { + list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) { + mutex_lock(&wilc->vif_mutex); + list_del_rcu(&vif->list); + wilc->vif_num--; + mutex_unlock(&wilc->vif_mutex); + synchronize_srcu(&wilc->srcu); if (vif->ndev) unregister_netdev(vif->ndev); } - srcu_read_unlock(&wilc->srcu, srcu_idx); wilc_wfi_deinit_mon_interface(wilc, false); destroy_workqueue(wilc->hif_workqueue); - while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) { - mutex_lock(&wilc->vif_mutex); - if (wilc->vif_num <= 0) { - mutex_unlock(&wilc->vif_mutex); - break; - } - vif = wilc_get_wl_to_vif(wilc); - if (!IS_ERR(vif)) - list_del_rcu(&vif->list); - - wilc->vif_num--; - mutex_unlock(&wilc->vif_mutex); - synchronize_srcu(&wilc->srcu); - ifc_cnt++; - } - wilc_wlan_cfg_deinit(wilc); wlan_deinit_locks(wilc); wiphy_unregister(wilc->wiphy); @@ -977,13 +963,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, goto error; } - wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM, - ndev->name); - if (!wl->hif_workqueue) { - ret = -ENOMEM; - goto unregister_netdev; - } - ndev->needs_free_netdev = true; vif->iftype = vif_type; vif->idx = wilc_get_available_idx(wl); @@ -996,12 +975,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, return vif; -unregister_netdev: +error: if (rtnl_locked) cfg80211_unregister_netdevice(ndev); else unregister_netdev(ndev); - error: free_netdev(ndev); return ERR_PTR(ret); } diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index b0fc5e68f..5877e2c1f 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -191,11 +191,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on) /* assert ENABLE: */ gpiod_set_value(gpios->enable, 1); mdelay(5); - /* assert RESET: */ - gpiod_set_value(gpios->reset, 1); - } else { /* deassert RESET: */ gpiod_set_value(gpios->reset, 0); + } else { + /* assert RESET: */ + gpiod_set_value(gpios->reset, 1); /* deassert ENABLE: */ gpiod_set_value(gpios->enable, 0); } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 6dd5ec1e4..ccac47dd7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6542,6 +6542,7 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw) if (priv->usb_interrupts) rtl8xxxu_write32(priv, REG_USB_HIMR, 0); + cancel_work_sync(&priv->c2hcmd_work); cancel_delayed_work_sync(&priv->ra_watchdog); rtl8xxxu_free_rx_resources(priv); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 4c8164db4..81f311292 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1989,8 +1989,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev) rtw_phy_setup_phy_cond(rtwdev, 0); rtw_phy_init_tx_power(rtwdev); - if (rfe_def->agc_btg_tbl) - rtw_load_table(rtwdev, rfe_def->agc_btg_tbl); rtw_load_table(rtwdev, rfe_def->phy_pg_tbl); rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl); rtw_phy_tx_power_by_rate_config(hal); diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index bd7d05e08..fde7b532b 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev) void rtw_phy_load_tables(struct rtw_dev *rtwdev) { + const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev); const struct rtw_chip_info *chip = rtwdev->chip; u8 rf_path; rtw_load_table(rtwdev, chip->mac_tbl); rtw_load_table(rtwdev, chip->bb_tbl); rtw_load_table(rtwdev, chip->agc_tbl); + if (rfe_def->agc_btg_tbl) + rtw_load_table(rtwdev, rfe_def->agc_btg_tbl); rtw_load_rfk_table(rtwdev); for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 609a2b863..50e3e46f7 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -674,9 +674,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev) dm_info->cck_fa_cnt = cck_fa_cnt; dm_info->ofdm_fa_cnt = ofdm_fa_cnt; + dm_info->total_fa_cnt = ofdm_fa_cnt; if (cck_enable) dm_info->total_fa_cnt += cck_fa_cnt; - dm_info->total_fa_cnt = ofdm_fa_cnt; crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK); dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt); diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c index 073e870b2..871667650 100644 --- a/drivers/net/wireless/silabs/wfx/sta.c +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -362,6 +362,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif) const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16); const int pairwise_cipher_suite_size = 4 / sizeof(u16); const int akm_suite_size = 4 / sizeof(u16); + int ret = -EINVAL; const u16 *ptr; if (unlikely(!skb)) @@ -370,22 +371,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif) ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, skb->len - ieoffset); if (unlikely(!ptr)) - return -EINVAL; + goto free_skb; ptr += pairwise_cipher_suite_count_offset; if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return -EINVAL; + goto free_skb; ptr += 1 + pairwise_cipher_suite_size * *ptr; if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return -EINVAL; + goto free_skb; ptr += 1 + akm_suite_size * *ptr; if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return -EINVAL; + goto free_skb; wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6)); - return 0; + ret = 0; + +free_skb: + dev_kfree_skb(skb); + return ret; } int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c index 9f43f256d..f0a4783ba 100644 --- a/drivers/net/wwan/t7xx/t7xx_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_cldma.c @@ -106,7 +106,7 @@ bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) { u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; - return ioread64(hw_info->ap_pdn_base + offset); + return ioread64_lo_hi(hw_info->ap_pdn_base + offset); } void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, @@ -117,7 +117,7 @@ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qn reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 : hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0; - iowrite64(address, reg + offset); + iowrite64_lo_hi(address, reg + offset); } void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 6ff30cb8e..5d6032ceb 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -139,8 +139,9 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool return -ENODEV; } - gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 + - queue->index * sizeof(u64)); + gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base + + REG_CLDMA_DL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) return 0; @@ -318,8 +319,8 @@ static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; /* Check current processing TGPD, 64-bit address is in a table by Q index */ - ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + - queue->index * sizeof(u64)); + ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); if (req->gpd_addr != ul_curr_addr) { spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c index 76da4c15e..f071ec7ff 100644 --- a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c +++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c @@ -75,7 +75,7 @@ static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_ for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) { offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i; reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; - iowrite64(0, reg); + iowrite64_lo_hi(0, reg); } } @@ -112,17 +112,17 @@ static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_ reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset; value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT; - iowrite64(value, reg); + iowrite64_lo_hi(value, reg); reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset; iowrite32(cfg->trsl_id, reg); reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0); - iowrite64(value, reg); + iowrite64_lo_hi(value, reg); /* Ensure ATR is set */ - ioread64(reg); + ioread64_lo_hi(reg); return 0; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index dc404e059..95b5ab4b9 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) return NULL; } skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); + skb_mark_for_recycle(skb); /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c index 27dd93def..d702bee78 100644 --- a/drivers/ntb/core.c +++ b/drivers/ntb/core.c @@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client); int ntb_register_device(struct ntb_dev *ntb) { + int ret; + if (!ntb) return -EINVAL; if (!ntb->pdev) @@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb) ntb->ctx_ops = NULL; spin_lock_init(&ntb->ctx_lock); - return device_register(&ntb->dev); + ret = device_register(&ntb->dev); + if (ret) + put_device(&ntb->dev); + + return ret; } EXPORT_SYMBOL(ntb_register_device); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0c088db94..20160683e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1151,7 +1151,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return effects; } -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status) { if (effects & NVME_CMD_EFFECTS_CSE_MASK) { @@ -1167,6 +1167,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, nvme_queue_scan(ctrl); flush_work(&ctrl->scan_work); } + if (ns) + return; switch (cmd->common.opcode) { case nvme_admin_set_features: @@ -4971,7 +4973,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, set->ops = ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; if (ctrl->ops->flags & NVME_F_FABRICS) - set->reserved_tags = NVMF_RESERVED_TAGS; + /* Reserved for fabric connect and keep alive */ + set->reserved_tags = 2; set->numa_node = ctrl->numa_node; set->flags = BLK_MQ_F_NO_SCHED; if (ctrl->ops->flags & NVME_F_BLOCKING) @@ -5029,7 +5032,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, memset(set, 0, sizeof(*set)); set->ops = ops; set->queue_depth = ctrl->sqsize + 1; - set->reserved_tags = NVMF_RESERVED_TAGS; + /* + * Some Apple controllers requires tags to be unique across admin and + * the (only) I/O queue, so reserve the first 32 tags of the I/O queue. + */ + if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) + set->reserved_tags = NVME_AQ_DEPTH; + else if (ctrl->ops->flags & NVME_F_FABRICS) + /* Reserved for fabric connect */ + set->reserved_tags = 1; set->numa_node = ctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE; if (ctrl->ops->flags & NVME_F_BLOCKING) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index dcac3df8a..60c238caf 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -19,13 +19,6 @@ #define NVMF_DEF_FAIL_FAST_TMO -1 /* - * Reserved one command for internal usage. This command is used for sending - * the connect command, as well as for the keep alive command on the admin - * queue once live. - */ -#define NVMF_RESERVED_TAGS 1 - -/* * Define a host as seen by the target. We allocate one at boot, but also * allow the override it when creating controllers. This is both to provide * persistence of the Host NQN over multiple boots, and to allow using diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 91e6d0347..b3e322e4a 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -147,6 +147,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, u64 *result, unsigned timeout, bool vec) { + struct nvme_ns *ns = q->queuedata; struct nvme_ctrl *ctrl; struct request *req; void *meta = NULL; @@ -181,7 +182,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, blk_mq_free_request(req); if (effects) - nvme_passthru_end(ctrl, effects, cmd, ret); + nvme_passthru_end(ctrl, ns, effects, cmd, ret); return ret; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a892d679e..8e28d2de4 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -1063,7 +1063,7 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); int nvme_execute_passthru_rq(struct request *rq, u32 *effects); -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index adc095875..a0a292d49 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -216,6 +216,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) struct nvmet_req *req = container_of(w, struct nvmet_req, p.work); struct request *rq = req->p.rq; struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; + struct nvme_ns *ns = rq->q->queuedata; u32 effects; int status; @@ -242,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) blk_mq_free_request(rq); if (effects) - nvme_passthru_end(ctrl, effects, req->cmd, status); + nvme_passthru_end(ctrl, ns, effects, req->cmd, status); } static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c index d6b533497..ba2714bef 100644 --- a/drivers/nvmem/meson-efuse.c +++ b/drivers/nvmem/meson-efuse.c @@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev) struct nvmem_config *econfig; struct clk *clk; unsigned int size; - int ret; sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0); if (!sm_np) { @@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev) if (!fw) return -EPROBE_DEFER; - clk = devm_clk_get(dev, NULL); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get efuse gate"); - return ret; - } - - ret = clk_prepare_enable(clk); - if (ret) { - dev_err(dev, "failed to enable gate"); - return ret; - } - - ret = devm_add_action_or_reset(dev, - (void(*)(void *))clk_disable_unprepare, - clk); - if (ret) { - dev_err(dev, "failed to add disable callback"); - return ret; - } + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate"); if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) { dev_err(dev, "failed to get max user"); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 9bb9fe0fa..e2a965101 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) "OF: " fmt +#include <linux/device.h> #include <linux/of.h> #include <linux/spinlock.h> #include <linux/slab.h> @@ -679,6 +680,17 @@ void of_changeset_destroy(struct of_changeset *ocs) { struct of_changeset_entry *ce, *cen; + /* + * When a device is deleted, the device links to/from it are also queued + * for deletion. Until these device links are freed, the devices + * themselves aren't freed. If the device being deleted is due to an + * overlay change, this device might be holding a reference to a device + * node that will be freed. So, wait until all already pending device + * links are deleted before freeing a device node. This ensures we don't + * free any device node that has a non-zero reference count. + */ + device_link_wait_removal(); + list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node) __of_changeset_entry_destroy(ce); } diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 2c7fb6834..de81bbf4b 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf, size_t count, loff_t *ppos) { struct icc_path *path = fp->private_data; + const char *name = icc_get_name(path); char buf[64]; - int i; + int i = 0; - i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path)); + if (name) + i = scnprintf(buf, sizeof(buf), "%.62s\n", name); return simple_read_from_buffer(userbuf, count, ppos, buf, i); } diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 4086a7818..506d6d061 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -669,8 +669,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + /* + * PCIe r6.0, sec 7.8.6.2 require us to support at least one + * size in the range from 1 MB to 512 GB. Advertise support + * for 1 MB BAR size only. + */ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); } dw_pcie_setup(pci); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 0ccd92faf..0bad23ec5 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -50,6 +50,7 @@ #define PARF_SLV_ADDR_SPACE_SIZE 0x358 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_TABLE_N 0x2000 +#define PARF_BDF_TO_SID_CFG 0x2c00 /* ELBI registers */ #define ELBI_SYS_CTRL 0x04 @@ -102,6 +103,9 @@ /* PARF_DEVICE_TYPE register fields */ #define DEVICE_TYPE_RC 0x4 +/* PARF_BDF_TO_SID_CFG fields */ +#define BDF_TO_SID_BYPASS BIT(0) + /* ELBI_SYS_CTRL register fields */ #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) @@ -1312,6 +1316,82 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } +static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) +{ + /* iommu map structure */ + struct { + u32 bdf; + u32 phandle; + u32 smmu_sid; + u32 smmu_sid_len; + } *map; + void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; + struct device *dev = pcie->pci->dev; + u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; + int i, nr_map, size = 0; + u32 smmu_sid_base; + u32 val; + + of_get_property(dev->of_node, "iommu-map", &size); + if (!size) + return 0; + + /* Enable BDF to SID translation by disabling bypass mode (default) */ + val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); + val &= ~BDF_TO_SID_BYPASS; + writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); + + map = kzalloc(size, GFP_KERNEL); + if (!map) + return -ENOMEM; + + of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, + size / sizeof(u32)); + + nr_map = size / (sizeof(*map)); + + crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); + + /* Registers need to be zero out first */ + memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); + + /* Extract the SMMU SID base from the first entry of iommu-map */ + smmu_sid_base = map[0].smmu_sid; + + /* Look for an available entry to hold the mapping */ + for (i = 0; i < nr_map; i++) { + __be16 bdf_be = cpu_to_be16(map[i].bdf); + u32 val; + u8 hash; + + hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); + + val = readl(bdf_to_sid_base + hash * sizeof(u32)); + + /* If the register is already populated, look for next available entry */ + while (val) { + u8 current_hash = hash++; + u8 next_mask = 0xff; + + /* If NEXT field is NULL then update it with next hash */ + if (!(val & next_mask)) { + val |= (u32)hash; + writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); + } + + val = readl(bdf_to_sid_base + hash * sizeof(u32)); + } + + /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ + val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; + writel(val, bdf_to_sid_base + hash * sizeof(u32)); + } + + kfree(map); + + return 0; +} + static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; @@ -1429,77 +1509,6 @@ static int qcom_pcie_link_up(struct dw_pcie *pci) return !!(val & PCI_EXP_LNKSTA_DLLLA); } -static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) -{ - /* iommu map structure */ - struct { - u32 bdf; - u32 phandle; - u32 smmu_sid; - u32 smmu_sid_len; - } *map; - void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; - struct device *dev = pcie->pci->dev; - u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; - int i, nr_map, size = 0; - u32 smmu_sid_base; - - of_get_property(dev->of_node, "iommu-map", &size); - if (!size) - return 0; - - map = kzalloc(size, GFP_KERNEL); - if (!map) - return -ENOMEM; - - of_property_read_u32_array(dev->of_node, - "iommu-map", (u32 *)map, size / sizeof(u32)); - - nr_map = size / (sizeof(*map)); - - crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); - - /* Registers need to be zero out first */ - memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); - - /* Extract the SMMU SID base from the first entry of iommu-map */ - smmu_sid_base = map[0].smmu_sid; - - /* Look for an available entry to hold the mapping */ - for (i = 0; i < nr_map; i++) { - __be16 bdf_be = cpu_to_be16(map[i].bdf); - u32 val; - u8 hash; - - hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), - 0); - - val = readl(bdf_to_sid_base + hash * sizeof(u32)); - - /* If the register is already populated, look for next available entry */ - while (val) { - u8 current_hash = hash++; - u8 next_mask = 0xff; - - /* If NEXT field is NULL then update it with next hash */ - if (!(val & next_mask)) { - val |= (u32)hash; - writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); - } - - val = readl(bdf_to_sid_base + hash * sizeof(u32)); - } - - /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ - val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; - writel(val, bdf_to_sid_base + hash * sizeof(u32)); - } - - kfree(map); - - return 0; -} - static int qcom_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -1616,7 +1625,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = { .init = qcom_pcie_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, - .config_sid = qcom_pcie_config_sid_sm8250, + .config_sid = qcom_pcie_config_sid_1_9_0, }; /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 9693bab59..b36cbc913 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -49,6 +49,7 @@ #include <linux/refcount.h> #include <linux/irqdomain.h> #include <linux/acpi.h> +#include <linux/sizes.h> #include <asm/mshyperv.h> /* @@ -465,7 +466,7 @@ struct pci_eject_response { u32 status; } __packed; -static int pci_ring_size = (4 * PAGE_SIZE); +static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K); /* * Driver specific state. diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c index 8c6931210..b4c1a4f60 100644 --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -1281,14 +1281,11 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = ntb_register_device(&ndev->ntb); if (ret) { dev_err(dev, "Failed to register NTB device\n"); - goto err_register_dev; + return ret; } dev_dbg(dev, "PCI Virtual NTB driver loaded\n"); return 0; - -err_register_dev: - return -EINVAL; } static struct pci_device_id pci_vntb_table[] = { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index f47a3b10b..8dda3b205 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -473,6 +473,13 @@ static void pci_device_remove(struct device *dev) if (drv->remove) { pm_runtime_get_sync(dev); + /* + * If the driver provides a .runtime_idle() callback and it has + * started to run already, it may continue to run in parallel + * with the code below, so wait until all of the runtime PM + * activity has completed. + */ + pm_runtime_barrier(dev); drv->remove(pci_dev); pm_runtime_put_noidle(dev); } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index e1d02b7c6..9950deeb0 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -357,11 +357,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) return 0; } -static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) -{ - return dev->error_state == pci_channel_io_perm_failure; -} - /* pci_dev priv_flags */ #define PCI_DEV_ADDED 0 #define PCI_DPC_RECOVERED 1 diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index a5d7c69b7..088002828 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -231,7 +231,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev) for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) { pci_read_config_dword(pdev, - cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); + cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix); pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); } clear_status: diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 59c90d04a..705893b5f 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -13,6 +13,7 @@ #define dev_fmt(fmt) "AER: " fmt #include <linux/pci.h> +#include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev, return 0; } +static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data) +{ + pm_runtime_get_sync(&pdev->dev); + return 0; +} + +static int pci_pm_runtime_put(struct pci_dev *pdev, void *data) +{ + pm_runtime_put(&pdev->dev); + return 0; +} + static int report_frozen_detected(struct pci_dev *dev, void *data) { return report_error_detected(dev, pci_channel_io_frozen, data); @@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, else bridge = pci_upstream_bridge(dev); + pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL); + pci_dbg(bridge, "broadcast error_detected message\n"); if (state == pci_channel_io_frozen) { pci_walk_bridge(bridge, report_frozen_detected, &status); @@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pcie_clear_device_status(dev); pci_aer_clear_nonfatal_status(dev); } + + pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); + pci_info(bridge, "device recovery successful\n"); return status; failed: + pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); + pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); /* TODO: Should kernel panic here? */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 51d634fbd..289ba6902 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5415,6 +5415,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev) pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); } +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags); @@ -6077,6 +6078,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size); #endif /* diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 3f3320d0a..d05a48263 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -1674,7 +1674,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, rc = switchtec_init_isr(stdev); if (rc) { dev_err(&stdev->dev, "failed to init isr.\n"); - goto err_put; + goto err_exit_pci; } iowrite32(SWITCHTEC_EVENT_CLEAR | @@ -1695,6 +1695,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev, err_devadd: stdev_kill(stdev); +err_exit_pci: + switchtec_exit_pci(stdev); err_put: ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt)); put_device(&stdev->dev); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 47e7c3206..899e4ed49 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -2178,6 +2178,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) dev_dbg(cmn->dev, "ignoring external node %llx\n", reg); continue; } + /* + * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus + * child count larger than the number of valid child pointers. + * A child offset of 0 can only occur on CMN-600; otherwise it + * would imply the root node being its own grandchild, which + * we can safely dismiss in general. + */ + if (reg == 0 && cmn->part != PART_CMN600) { + dev_dbg(cmn->dev, "bogus child pointer?\n"); + continue; + } arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn); diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index 56897d4d4..2d5cf135e 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -246,6 +246,10 @@ static int riscv_pmu_event_init(struct perf_event *event) u64 event_config = 0; uint64_t cmask; + /* driver does not support branch stack sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + hwc->flags = 0; mapped_event = rvpmu->event_map(event, &event_config); if (mapped_event < 0) { diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 4d5b4071d..dc22b1dd2 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1518,6 +1518,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, } EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion); +int tegra_xusb_padctl_get_port_number(struct phy *phy) +{ + struct tegra_xusb_lane *lane; + + if (!phy) + return -ENODEV; + + lane = phy_get_drvdata(phy); + + return lane->index; +} +EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number); + MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c index a02f7c326..09edcf47e 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c @@ -1198,7 +1198,6 @@ static const struct mtk_pin_reg_calc mt8186_reg_cals[PINCTRL_PIN_REG_MAX] = { [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8186_pin_dir_range), [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8186_pin_di_range), [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8186_pin_do_range), - [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8186_pin_dir_range), [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8186_pin_smt_range), [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8186_pin_ies_range), [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8186_pin_pu_range), diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c index 9695f4ec6..f120268c0 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c @@ -1379,7 +1379,6 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = { [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range), [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range), [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range), - [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range), [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range), [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range), [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range), diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index f7d02513d..e79037dc8 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1571,8 +1571,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function, * Then mask the pins that need to be sleeping now when we're * switching to the ALT C function. */ - for (i = 0; i < g->grp.npins; i++) - slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]); + for (i = 0; i < g->grp.npins; i++) { + unsigned int bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP; + slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(bit); + } nmk_gpio_glitch_slpm_init(slpm); } diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c index 43a63a21a..acf7664ea 100644 --- a/drivers/pinctrl/renesas/pfc-r8a779g0.c +++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c @@ -2360,6 +2360,30 @@ static const unsigned int scif_clk_mux[] = { SCIF_CLK_MARK, }; +static const unsigned int scif_clk2_pins[] = { + /* SCIF_CLK2 */ + RCAR_GP_PIN(8, 11), +}; +static const unsigned int scif_clk2_mux[] = { + SCIF_CLK2_MARK, +}; + +/* - SSI ------------------------------------------------- */ +static const unsigned int ssi_data_pins[] = { + /* SSI_SD */ + RCAR_GP_PIN(1, 20), +}; +static const unsigned int ssi_data_mux[] = { + SSI_SD_MARK, +}; +static const unsigned int ssi_ctrl_pins[] = { + /* SSI_SCK, SSI_WS */ + RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 19), +}; +static const unsigned int ssi_ctrl_mux[] = { + SSI_SCK_MARK, SSI_WS_MARK, +}; + /* - TPU ------------------------------------------------------------------- */ static const unsigned int tpu_to0_pins[] = { /* TPU0TO0 */ @@ -2651,6 +2675,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scif4_clk), SH_PFC_PIN_GROUP(scif4_ctrl), SH_PFC_PIN_GROUP(scif_clk), + SH_PFC_PIN_GROUP(scif_clk2), + + SH_PFC_PIN_GROUP(ssi_data), + SH_PFC_PIN_GROUP(ssi_ctrl), SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */ SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */ @@ -2964,6 +2992,15 @@ static const char * const scif_clk_groups[] = { "scif_clk", }; +static const char * const scif_clk2_groups[] = { + "scif_clk2", +}; + +static const char * const ssi_groups[] = { + "ssi_data", + "ssi_ctrl", +}; + static const char * const tpu_groups[] = { /* suffix might be updated */ "tpu_to0", @@ -3044,6 +3081,9 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(scif3), SH_PFC_FUNCTION(scif4), SH_PFC_FUNCTION(scif_clk), + SH_PFC_FUNCTION(scif_clk2), + + SH_PFC_FUNCTION(ssi), SH_PFC_FUNCTION(tpu), diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c index 17cc4b45e..a64f56ddd 100644 --- a/drivers/platform/x86/p2sb.c +++ b/drivers/platform/x86/p2sb.c @@ -20,9 +20,11 @@ #define P2SBC_HIDE BIT(8) #define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1) +#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0) +#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2) static const struct x86_cpu_id p2sb_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT), {} }; @@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn) static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn) { - unsigned int slot, fn; - - if (PCI_FUNC(devfn) == 0) { - /* - * When function number of the P2SB device is zero, scan it and - * other function numbers, and if devices are available, cache - * their BAR0s. - */ - slot = PCI_SLOT(devfn); - for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++) - p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn)); - } else { - /* Scan the P2SB device and cache its BAR0 */ - p2sb_scan_and_cache_devfn(bus, devfn); - } + /* Scan the P2SB device and cache its BAR0 */ + p2sb_scan_and_cache_devfn(bus, devfn); + + /* On Goldmont p2sb_bar() also gets called for the SPI controller */ + if (devfn == P2SB_DEVFN_GOLDMONT) + p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT); if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res)) return -ENOENT; diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index 9193c3b8e..ae7ee6119 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -219,7 +219,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent) ret = freq_qos_add_request(&policy->constraints, &dtpm_cpu->qos_req, FREQ_QOS_MAX, pd->table[pd->nr_perf_states - 1].frequency); - if (ret) + if (ret < 0) goto out_dtpm_unregister; cpufreq_cpu_put(policy); diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index a43b2babc..3e9c94a8d 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c @@ -38,11 +38,11 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip) return container_of(chip, struct atmel_hlcdc_pwm, chip); } -static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, +static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { - struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c); - struct atmel_hlcdc *hlcdc = chip->hlcdc; + struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip); + struct atmel_hlcdc *hlcdc = atmel->hlcdc; unsigned int status; int ret; @@ -54,7 +54,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, u32 pwmcfg; int pres; - if (!chip->errata || !chip->errata->slow_clk_erratum) { + if (!atmel->errata || !atmel->errata->slow_clk_erratum) { clk_freq = clk_get_rate(new_clk); if (!clk_freq) return -EINVAL; @@ -64,7 +64,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, } /* Errata: cannot use slow clk on some IP revisions */ - if ((chip->errata && chip->errata->slow_clk_erratum) || + if ((atmel->errata && atmel->errata->slow_clk_erratum) || clk_period_ns > state->period) { new_clk = hlcdc->sys_clk; clk_freq = clk_get_rate(new_clk); @@ -77,8 +77,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) { /* Errata: cannot divide by 1 on some IP revisions */ - if (!pres && chip->errata && - chip->errata->div1_clk_erratum) + if (!pres && atmel->errata && + atmel->errata->div1_clk_erratum) continue; if ((clk_period_ns << pres) >= state->period) @@ -90,7 +90,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, pwmcfg = ATMEL_HLCDC_PWMPS(pres); - if (new_clk != chip->cur_clk) { + if (new_clk != atmel->cur_clk) { u32 gencfg = 0; int ret; @@ -98,8 +98,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, if (ret) return ret; - clk_disable_unprepare(chip->cur_clk); - chip->cur_clk = new_clk; + clk_disable_unprepare(atmel->cur_clk); + atmel->cur_clk = new_clk; if (new_clk == hlcdc->sys_clk) gencfg = ATMEL_HLCDC_CLKPWMSEL; @@ -160,8 +160,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, if (ret) return ret; - clk_disable_unprepare(chip->cur_clk); - chip->cur_clk = NULL; + clk_disable_unprepare(atmel->cur_clk); + atmel->cur_clk = NULL; } return 0; @@ -183,31 +183,32 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = { #ifdef CONFIG_PM_SLEEP static int atmel_hlcdc_pwm_suspend(struct device *dev) { - struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev); + struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev); /* Keep the periph clock enabled if the PWM is still running. */ - if (pwm_is_enabled(&chip->chip.pwms[0])) - clk_disable_unprepare(chip->hlcdc->periph_clk); + if (!pwm_is_enabled(&atmel->chip.pwms[0])) + clk_disable_unprepare(atmel->hlcdc->periph_clk); return 0; } static int atmel_hlcdc_pwm_resume(struct device *dev) { - struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev); + struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev); struct pwm_state state; int ret; - pwm_get_state(&chip->chip.pwms[0], &state); + pwm_get_state(&atmel->chip.pwms[0], &state); /* Re-enable the periph clock it was stopped during suspend. */ if (!state.enabled) { - ret = clk_prepare_enable(chip->hlcdc->periph_clk); + ret = clk_prepare_enable(atmel->hlcdc->periph_clk); if (ret) return ret; } - return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state); + return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0], + &state); } #endif @@ -244,14 +245,14 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev) { const struct of_device_id *match; struct device *dev = &pdev->dev; - struct atmel_hlcdc_pwm *chip; + struct atmel_hlcdc_pwm *atmel; struct atmel_hlcdc *hlcdc; int ret; hlcdc = dev_get_drvdata(dev->parent); - chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); - if (!chip) + atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL); + if (!atmel) return -ENOMEM; ret = clk_prepare_enable(hlcdc->periph_clk); @@ -260,33 +261,31 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev) match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node); if (match) - chip->errata = match->data; + atmel->errata = match->data; - chip->hlcdc = hlcdc; - chip->chip.ops = &atmel_hlcdc_pwm_ops; - chip->chip.dev = dev; - chip->chip.npwm = 1; + atmel->hlcdc = hlcdc; + atmel->chip.ops = &atmel_hlcdc_pwm_ops; + atmel->chip.dev = dev; + atmel->chip.npwm = 1; - ret = pwmchip_add(&chip->chip); + ret = pwmchip_add(&atmel->chip); if (ret) { clk_disable_unprepare(hlcdc->periph_clk); return ret; } - platform_set_drvdata(pdev, chip); + platform_set_drvdata(pdev, atmel); return 0; } -static int atmel_hlcdc_pwm_remove(struct platform_device *pdev) +static void atmel_hlcdc_pwm_remove(struct platform_device *pdev) { - struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev); + struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev); - pwmchip_remove(&chip->chip); + pwmchip_remove(&atmel->chip); - clk_disable_unprepare(chip->hlcdc->periph_clk); - - return 0; + clk_disable_unprepare(atmel->hlcdc->periph_clk); } static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = { @@ -301,7 +300,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = { .pm = &atmel_hlcdc_pwm_pm_ops, }, .probe = atmel_hlcdc_pwm_probe, - .remove = atmel_hlcdc_pwm_remove, + .remove_new = atmel_hlcdc_pwm_remove, }; module_platform_driver(atmel_hlcdc_pwm_driver); diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index 0fccf061a..8ce6c453a 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -289,9 +289,9 @@ static int img_pwm_probe(struct platform_device *pdev) return PTR_ERR(imgchip->sys_clk); } - imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip"); + imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm"); if (IS_ERR(imgchip->pwm_clk)) { - dev_err(&pdev->dev, "failed to get imgchip clock\n"); + dev_err(&pdev->dev, "failed to get pwm clock\n"); return PTR_ERR(imgchip->pwm_clk); } diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index 652fdb8dc..0a7920cbd 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -395,8 +395,17 @@ out: static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { + struct sti_pwm_chip *pc = to_sti_pwmchip(chip); + struct sti_pwm_compat_data *cdata = pc->cdata; + struct device *dev = pc->dev; int err; + if (pwm->hwpwm >= cdata->pwm_num_devs) { + dev_err(dev, "device %u is not valid for pwm mode\n", + pwm->hwpwm); + return -EINVAL; + } + if (state->polarity != PWM_POLARITY_NORMAL) return -EINVAL; @@ -647,7 +656,7 @@ static int sti_pwm_probe(struct platform_device *pdev) pc->chip.dev = dev; pc->chip.ops = &sti_pwm_ops; - pc->chip.npwm = pc->cdata->pwm_num_devs; + pc->chip.npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs); for (i = 0; i < cdata->cpt_num_devs; i++) { struct sti_cpt_ddata *ddata = &cdata->ddata[i]; diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 166019786..d93113b6f 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -313,7 +313,7 @@ config ST_SLIM_REMOTEPROC config STM32_RPROC tristate "STM32 remoteproc support" - depends on ARCH_STM32 + depends on ARCH_STM32 || COMPILE_TEST depends on REMOTEPROC select MAILBOX help diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 0e95525c1..ab5e4f02a 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev) kfree(vdev); + of_reserved_mem_device_release(&rvdev->pdev->dev); + dma_release_coherent_memory(&rvdev->pdev->dev); + put_device(&rvdev->pdev->dev); } @@ -584,9 +587,6 @@ static int rproc_virtio_remove(struct platform_device *pdev) rproc_remove_subdev(rproc, &rvdev->subdev); rproc_remove_rvdev(rvdev); - of_reserved_mem_device_release(&pdev->dev); - dma_release_coherent_memory(&pdev->dev); - put_device(&rproc->dev); return 0; diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index 8746cbb1f..74da03931 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -118,10 +118,10 @@ static int stm32_rproc_mem_alloc(struct rproc *rproc, struct device *dev = rproc->dev.parent; void *va; - dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len); - va = ioremap_wc(mem->dma, mem->len); + dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len); + va = (__force void *)ioremap_wc(mem->dma, mem->len); if (IS_ERR_OR_NULL(va)) { - dev_err(dev, "Unable to map memory region: %pa+%x\n", + dev_err(dev, "Unable to map memory region: %pad+0x%zx\n", &mem->dma, mem->len); return -ENOMEM; } @@ -136,7 +136,7 @@ static int stm32_rproc_mem_release(struct rproc *rproc, struct rproc_mem_entry *mem) { dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma); - iounmap(mem->va); + iounmap((__force __iomem void *)mem->va); return 0; } @@ -627,7 +627,7 @@ stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz) ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE); if (IS_ERR_OR_NULL(ddata->rsc_va)) { - dev_err(dev, "Unable to map memory region: %pa+%zx\n", + dev_err(dev, "Unable to map memory region: %pa+%x\n", &rsc_pa, RSC_TBL_SIZE); ddata->rsc_va = NULL; return ERR_PTR(-ENOMEM); @@ -641,7 +641,7 @@ done: * entire area by overwriting it with the initial values stored in rproc->clean_table. */ *table_sz = RSC_TBL_SIZE; - return (struct resource_table *)ddata->rsc_va; + return (__force struct resource_table *)ddata->rsc_va; } static const struct rproc_ops st_rproc_ops = { @@ -889,7 +889,7 @@ static int stm32_rproc_remove(struct platform_device *pdev) return 0; } -static int __maybe_unused stm32_rproc_suspend(struct device *dev) +static int stm32_rproc_suspend(struct device *dev) { struct rproc *rproc = dev_get_drvdata(dev); struct stm32_rproc *ddata = rproc->priv; @@ -900,7 +900,7 @@ static int __maybe_unused stm32_rproc_suspend(struct device *dev) return 0; } -static int __maybe_unused stm32_rproc_resume(struct device *dev) +static int stm32_rproc_resume(struct device *dev) { struct rproc *rproc = dev_get_drvdata(dev); struct stm32_rproc *ddata = rproc->priv; @@ -911,16 +911,16 @@ static int __maybe_unused stm32_rproc_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops, - stm32_rproc_suspend, stm32_rproc_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops, + stm32_rproc_suspend, stm32_rproc_resume); static struct platform_driver stm32_rproc_driver = { .probe = stm32_rproc_probe, .remove = stm32_rproc_remove, .driver = { .name = "stm32-rproc", - .pm = &stm32_rproc_pm_ops, - .of_match_table = of_match_ptr(stm32_rproc_match), + .pm = pm_ptr(&stm32_rproc_pm_ops), + .of_match_table = stm32_rproc_match, }, }; module_platform_driver(stm32_rproc_driver); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index bb63edb50..87dc050ca 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1843,7 +1843,8 @@ config RTC_DRV_MT2712 config RTC_DRV_MT6397 tristate "MediaTek PMIC based RTC" - depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN) + depends on MFD_MT6397 || COMPILE_TEST + select IRQ_DOMAIN help This selects the MediaTek(R) RTC driver. RTC is part of MediaTek MT6397 PMIC. You should enable MT6397 PMIC MFD before select diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c index d5caf36c5..225c859d6 100644 --- a/drivers/rtc/lib_test.c +++ b/drivers/rtc/lib_test.c @@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test) days = div_s64(secs, 86400); - #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \ + #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \ year, month, mday, yday, days KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index f207de4a8..341d65acd 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -8,9 +8,6 @@ * Copyright IBM Corp. 1999, 2009 */ -#define KMSG_COMPONENT "dasd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - #include <linux/kmod.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -73,7 +70,8 @@ static void dasd_profile_init(struct dasd_profile *, struct dentry *); static void dasd_profile_exit(struct dasd_profile *); static void dasd_hosts_init(struct dentry *, struct dasd_device *); static void dasd_hosts_exit(struct dasd_device *); - +static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *, + unsigned int); /* * SECTION: Operations on the device structure. */ @@ -2327,7 +2325,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) /* Non-temporary stop condition will trigger fail fast */ if (device->stopped & ~DASD_STOPPED_PENDING && test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && - (!dasd_eer_enabled(device))) { + !dasd_eer_enabled(device) && device->aq_mask == 0) { cqr->status = DASD_CQR_FAILED; cqr->intrc = -ENOLINK; continue; @@ -2803,20 +2801,18 @@ restart: dasd_log_sense(cqr, &cqr->irb); } - /* First of all call extended error reporting. */ - if (dasd_eer_enabled(base) && - cqr->status == DASD_CQR_FAILED) { - dasd_eer_write(base, cqr, DASD_EER_FATALERROR); - - /* restart request */ + /* + * First call extended error reporting and check for autoquiesce + */ + spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); + if (cqr->status == DASD_CQR_FAILED && + dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) { cqr->status = DASD_CQR_FILLED; cqr->retries = 255; - spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); - dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); - spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), - flags); + spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); goto restart; } + spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); /* Process finished ERP request. */ if (cqr->refers) { @@ -2858,7 +2854,7 @@ static void __dasd_block_start_head(struct dasd_block *block) /* Non-temporary stop condition will trigger fail fast */ if (block->base->stopped & ~DASD_STOPPED_PENDING && test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && - (!dasd_eer_enabled(block->base))) { + !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) { cqr->status = DASD_CQR_FAILED; cqr->intrc = -ENOLINK; dasd_schedule_block_bh(block); @@ -3391,8 +3387,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie) ret = ccw_device_set_online(cdev); if (ret) - pr_warn("%s: Setting the DASD online failed with rc=%d\n", - dev_name(&cdev->dev), ret); + dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret); } /* @@ -3479,8 +3474,11 @@ int dasd_generic_set_online(struct ccw_device *cdev, { struct dasd_discipline *discipline; struct dasd_device *device; + struct device *dev; int rc; + dev = &cdev->dev; + /* first online clears initial online feature flag */ dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); device = dasd_create_device(cdev); @@ -3493,11 +3491,10 @@ int dasd_generic_set_online(struct ccw_device *cdev, /* Try to load the required module. */ rc = request_module(DASD_DIAG_MOD); if (rc) { - pr_warn("%s Setting the DASD online failed " - "because the required module %s " - "could not be loaded (rc=%d)\n", - dev_name(&cdev->dev), DASD_DIAG_MOD, - rc); + dev_warn(dev, "Setting the DASD online failed " + "because the required module %s " + "could not be loaded (rc=%d)\n", + DASD_DIAG_MOD, rc); dasd_delete_device(device); return -ENODEV; } @@ -3505,8 +3502,7 @@ int dasd_generic_set_online(struct ccw_device *cdev, /* Module init could have failed, so check again here after * request_module(). */ if (!dasd_diag_discipline_pointer) { - pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n"); dasd_delete_device(device); return -ENODEV; } @@ -3516,37 +3512,33 @@ int dasd_generic_set_online(struct ccw_device *cdev, dasd_delete_device(device); return -EINVAL; } + device->base_discipline = base_discipline; if (!try_module_get(discipline->owner)) { - module_put(base_discipline->owner); dasd_delete_device(device); return -EINVAL; } - device->base_discipline = base_discipline; device->discipline = discipline; /* check_device will allocate block device if necessary */ rc = discipline->check_device(device); if (rc) { - pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", - dev_name(&cdev->dev), discipline->name, rc); - module_put(discipline->owner); - module_put(base_discipline->owner); + dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n", + discipline->name, rc); dasd_delete_device(device); return rc; } dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) { - pr_warn("%s Setting the DASD online failed because of a missing discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n"); rc = -ENODEV; dasd_set_target_state(device, DASD_STATE_NEW); if (device->block) dasd_free_block(device->block); dasd_delete_device(device); - } else - pr_debug("dasd_generic device %s found\n", - dev_name(&cdev->dev)); + } else { + dev_dbg(dev, "dasd_generic device found\n"); + } wait_event(dasd_init_waitq, _wait_for_device(device)); @@ -3557,10 +3549,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online); int dasd_generic_set_offline(struct ccw_device *cdev) { + int max_count, open_count, rc; struct dasd_device *device; struct dasd_block *block; - int max_count, open_count, rc; unsigned long flags; + struct device *dev; + + dev = &cdev->dev; rc = 0; spin_lock_irqsave(get_ccwdev_lock(cdev), flags); @@ -3581,11 +3576,10 @@ int dasd_generic_set_offline(struct ccw_device *cdev) open_count = atomic_read(&device->block->open_count); if (open_count > max_count) { if (open_count > 0) - pr_warn("%s: The DASD cannot be set offline with open count %i\n", - dev_name(&cdev->dev), open_count); + dev_warn(dev, "The DASD cannot be set offline with open count %i\n", + open_count); else - pr_warn("%s: The DASD cannot be set offline while it is in use\n", - dev_name(&cdev->dev)); + dev_warn(dev, "The DASD cannot be set offline while it is in use\n"); rc = -EBUSY; goto out_err; } @@ -3682,8 +3676,8 @@ int dasd_generic_last_path_gone(struct dasd_device *device) dev_warn(&device->cdev->dev, "No operational channel path is left " "for the device\n"); DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); - /* First of all call extended error reporting. */ - dasd_eer_write(device, NULL, DASD_EER_NOPATH); + /* First call extended error reporting and check for autoquiesce. */ + dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); if (device->state < DASD_STATE_BASIC) return 0; @@ -3815,7 +3809,8 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) "No verified channel paths remain for the device\n"); DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last verified path gone"); - dasd_eer_write(device, NULL, DASD_EER_NOPATH); + /* First call extended error reporting and check for autoquiesce. */ + dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); } @@ -3837,7 +3832,8 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path); void dasd_generic_space_exhaust(struct dasd_device *device, struct dasd_ccw_req *cqr) { - dasd_eer_write(device, NULL, DASD_EER_NOSPC); + /* First call extended error reporting and check for autoquiesce. */ + dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC); if (device->state < DASD_STATE_BASIC) return; @@ -3931,6 +3927,31 @@ void dasd_schedule_requeue(struct dasd_device *device) } EXPORT_SYMBOL(dasd_schedule_requeue); +static int dasd_handle_autoquiesce(struct dasd_device *device, + struct dasd_ccw_req *cqr, + unsigned int reason) +{ + /* in any case write eer message with reason */ + if (dasd_eer_enabled(device)) + dasd_eer_write(device, cqr, reason); + + if (!test_bit(reason, &device->aq_mask)) + return 0; + + /* notify eer about autoquiesce */ + if (dasd_eer_enabled(device)) + dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); + + dev_info(&device->cdev->dev, + "The DASD has been put in the quiesce state\n"); + dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); + + if (device->features & DASD_FEATURE_REQUEUEQUIESCE) + dasd_schedule_requeue(device); + + return 1; +} + static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, int rdc_buffer_size, int magic) diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index d4d31cd11..d16c699b9 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -387,6 +387,7 @@ void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, break; case DASD_EER_NOPATH: case DASD_EER_NOSPC: + case DASD_EER_AUTOQUIESCE: dasd_eer_write_standard_trigger(device, NULL, id); break; case DASD_EER_STATECHANGE: diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index f50932518..00bcd1772 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -464,6 +464,7 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; #define DASD_EER_STATECHANGE 3 #define DASD_EER_PPRCSUSPEND 4 #define DASD_EER_NOSPC 5 +#define DASD_EER_AUTOQUIESCE 31 /* DASD path handling */ @@ -641,6 +642,7 @@ struct dasd_device { struct dasd_format_entry format_entry; struct kset *paths_info; struct dasd_copy_relation *copy; + unsigned long aq_mask; }; struct dasd_block { diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 28e34d155..6f44963d3 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -617,6 +617,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, { if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) return NULL; + zcrypt_card_get(zc); zcrypt_queue_get(zq); get_device(&zq->queue->ap_dev.device); atomic_add(weight, &zc->load); @@ -636,6 +637,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc, atomic_sub(weight, &zq->load); put_device(&zq->queue->ap_dev.device); zcrypt_queue_put(zq); + zcrypt_card_put(zc); module_put(mod); } diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ae4b6d24b..1e6340e2c 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1179,6 +1179,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, } } +/** + * qeth_irq() - qeth interrupt handler + * @cdev: ccw device + * @intparm: expect pointer to iob + * @irb: Interruption Response Block + * + * In the good path: + * corresponding qeth channel is locked with last used iob as active_cmd. + * But this function is also called for error interrupts. + * + * Caller ensures that: + * Interrupts are disabled; ccw device lock is held; + * + */ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { @@ -1220,11 +1234,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, iob = (struct qeth_cmd_buffer *) (addr_t)intparm; } - qeth_unlock_channel(card, channel); - rc = qeth_check_irb_error(card, cdev, irb); if (rc) { /* IO was terminated, free its resources. */ + qeth_unlock_channel(card, channel); if (iob) qeth_cancel_cmd(iob, rc); return; @@ -1268,6 +1281,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, rc = qeth_get_problem(card, cdev, irb); if (rc) { card->read_or_write_problem = 1; + qeth_unlock_channel(card, channel); if (iob) qeth_cancel_cmd(iob, rc); qeth_clear_ipacmd_list(card); @@ -1276,6 +1290,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } } + if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) { + /* channel command hasn't started: retry. + * active_cmd is still set to last iob + */ + QETH_CARD_TEXT(card, 2, "irqcc1"); + rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob), + (addr_t)iob, 0, 0, iob->timeout); + if (rc) { + QETH_DBF_MESSAGE(2, + "ccw retry on %x failed, rc = %i\n", + CARD_DEVID(card), rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); + qeth_unlock_channel(card, channel); + qeth_cancel_cmd(iob, rc); + } + return; + } + + qeth_unlock_channel(card, channel); + if (iob) { /* sanity check: */ if (irb->scsw.cmd.count > iob->length) { diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 7bd2ba1ad..f30fe324e 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -20,7 +20,6 @@ struct bfa_s; typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); -typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status); /* * Interrupt message handlers @@ -437,4 +436,12 @@ struct bfa_cb_pending_q_s { (__qe)->data = (__data); \ } while (0) +#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do { \ + bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \ + (__qe)->hcb_qe.cbfn_status = (__cbfn); \ + (__qe)->hcb_qe.cbarg = (__cbarg); \ + (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \ + (__qe)->data = (__data); \ +} while (0) + #endif /* __BFA_H__ */ diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 6846ca8f7..3438d0b8b 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1907,15 +1907,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) struct list_head *qe; struct list_head *qen; struct bfa_cb_qe_s *hcb_qe; - bfa_cb_cbfn_status_t cbfn; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; if (hcb_qe->pre_rmv) { /* qe is invalid after return, dequeue before cbfn() */ list_del(qe); - cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); - cbfn(hcb_qe->cbarg, hcb_qe->fw_status); + hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status); } else hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); } diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 933a1c389..5e568d6d7 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -361,14 +361,18 @@ struct bfa_reqq_wait_s { void *cbarg; }; -typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); +typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); +typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status); /* * Generic BFA callback element. */ struct bfa_cb_qe_s { struct list_head qe; - bfa_cb_cbfn_t cbfn; + union { + bfa_cb_cbfn_status_t cbfn_status; + bfa_cb_cbfn_t cbfn; + }; bfa_boolean_t once; bfa_boolean_t pre_rmv; /* set for stack based qe(s) */ bfa_status_t fw_status; /* to access fw status in comp proc */ diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index be8dfbe13..524e4e697 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -2135,8 +2135,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, - &fcomp, &iocmd->stats); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -2159,7 +2158,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); @@ -2443,8 +2442,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, - &fcomp, &iocmd->stats); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); @@ -2474,8 +2472,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, - &fcomp, NULL); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h index c38017b4a..e50e93e7f 100644 --- a/drivers/scsi/csiostor/csio_defs.h +++ b/drivers/scsi/csiostor/csio_defs.h @@ -73,7 +73,21 @@ csio_list_deleted(struct list_head *list) #define csio_list_prev(elem) (((struct list_head *)(elem))->prev) /* State machine */ -typedef void (*csio_sm_state_t)(void *, uint32_t); +struct csio_lnode; + +/* State machine evets */ +enum csio_ln_ev { + CSIO_LNE_NONE = (uint32_t)0, + CSIO_LNE_LINKUP, + CSIO_LNE_FAB_INIT_DONE, + CSIO_LNE_LINK_DOWN, + CSIO_LNE_DOWN_LINK, + CSIO_LNE_LOGO, + CSIO_LNE_CLOSE, + CSIO_LNE_MAX_EVENT, +}; + +typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt); struct csio_sm { struct list_head sm_list; @@ -83,7 +97,7 @@ struct csio_sm { static inline void csio_set_state(void *smp, void *state) { - ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state; + ((struct csio_sm *)smp)->sm_state = state; } static inline void diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index d5ac93897..5b3ffefae 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -1095,7 +1095,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, int csio_is_lnode_ready(struct csio_lnode *ln) { - return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); + return (csio_get_state(ln) == csio_lns_ready); } /*****************************************************************************/ @@ -1366,15 +1366,15 @@ csio_free_fcfinfo(struct kref *kref) void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) { - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { + if (csio_get_state(ln) == csio_lns_uninit) { strcpy(str, "UNINIT"); return; } - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { + if (csio_get_state(ln) == csio_lns_ready) { strcpy(str, "READY"); return; } - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { + if (csio_get_state(ln) == csio_lns_offline) { strcpy(str, "OFFLINE"); return; } diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h index 372a67d12..607698a0f 100644 --- a/drivers/scsi/csiostor/csio_lnode.h +++ b/drivers/scsi/csiostor/csio_lnode.h @@ -53,19 +53,6 @@ extern int csio_fcoe_rnodes; extern int csio_fdmi_enable; -/* State machine evets */ -enum csio_ln_ev { - CSIO_LNE_NONE = (uint32_t)0, - CSIO_LNE_LINKUP, - CSIO_LNE_FAB_INIT_DONE, - CSIO_LNE_LINK_DOWN, - CSIO_LNE_DOWN_LINK, - CSIO_LNE_LOGO, - CSIO_LNE_CLOSE, - CSIO_LNE_MAX_EVENT, -}; - - struct csio_fcf_info { struct list_head list; uint8_t priority; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 8b825364b..c785493b1 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -353,12 +353,13 @@ static void scsi_host_dev_release(struct device *dev) if (shost->shost_state == SHOST_CREATED) { /* - * Free the shost_dev device name here if scsi_host_alloc() - * and scsi_host_put() have been called but neither + * Free the shost_dev device name and remove the proc host dir + * here if scsi_host_{alloc,put}() have been called but neither * scsi_host_add() nor scsi_host_remove() has been called. * This avoids that the memory allocated for the shost_dev - * name is leaked. + * name as well as the proc dir structure are leaked. */ + scsi_proc_hostdir_rm(shost->hostt); kfree(dev_name(&shost->shost_dev)); } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 63a23251f..4b5ceba68 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1651,6 +1651,16 @@ out_err: /* ---------- Domain revalidation ---------- */ +static void sas_get_sas_addr_and_dev_type(struct smp_disc_resp *disc_resp, + u8 *sas_addr, + enum sas_device_type *type) +{ + memcpy(sas_addr, disc_resp->disc.attached_sas_addr, SAS_ADDR_SIZE); + *type = to_dev_type(&disc_resp->disc); + if (*type == SAS_PHY_UNUSED) + memset(sas_addr, 0, SAS_ADDR_SIZE); +} + static int sas_get_phy_discover(struct domain_device *dev, int phy_id, struct smp_disc_resp *disc_resp) { @@ -1704,13 +1714,8 @@ int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, return -ENOMEM; res = sas_get_phy_discover(dev, phy_id, disc_resp); - if (res == 0) { - memcpy(sas_addr, disc_resp->disc.attached_sas_addr, - SAS_ADDR_SIZE); - *type = to_dev_type(&disc_resp->disc); - if (*type == 0) - memset(sas_addr, 0, SAS_ADDR_SIZE); - } + if (res == 0) + sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, type); kfree(disc_resp); return res; } @@ -1972,6 +1977,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; enum sas_device_type type = SAS_PHY_UNUSED; + struct smp_disc_resp *disc_resp; u8 sas_addr[SAS_ADDR_SIZE]; char msg[80] = ""; int res; @@ -1983,33 +1989,41 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, SAS_ADDR(dev->sas_addr), phy_id, msg); memset(sas_addr, 0, SAS_ADDR_SIZE); - res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type); + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) + return -ENOMEM; + + res = sas_get_phy_discover(dev, phy_id, disc_resp); switch (res) { case SMP_RESP_NO_PHY: phy->phy_state = PHY_NOT_PRESENT; sas_unregister_devs_sas_addr(dev, phy_id, last); - return res; + goto out_free_resp; case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; sas_unregister_devs_sas_addr(dev, phy_id, last); - return res; + goto out_free_resp; case SMP_RESP_FUNC_ACC: break; case -ECOMM: break; default: - return res; + goto out_free_resp; } + if (res == 0) + sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, &type); + if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) { phy->phy_state = PHY_EMPTY; sas_unregister_devs_sas_addr(dev, phy_id, last); /* - * Even though the PHY is empty, for convenience we discover - * the PHY to update the PHY info, like negotiated linkrate. + * Even though the PHY is empty, for convenience we update + * the PHY info, like negotiated linkrate. */ - sas_ex_phy_discover(dev, phy_id); - return res; + if (res == 0) + sas_set_ex_phy(dev, phy_id, disc_resp); + goto out_free_resp; } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) && dev_type_flutter(type, phy->attached_dev_type)) { struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id); @@ -2021,7 +2035,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, action = ", needs recovery"; pr_debug("ex %016llx phy%02d broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); - return res; + goto out_free_resp; } /* we always have to delete the old device when we went here */ @@ -2030,7 +2044,10 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, SAS_ADDR(phy->attached_sas_addr)); sas_unregister_devs_sas_addr(dev, phy_id, last); - return sas_discover_new(dev, phy_id); + res = sas_discover_new(dev, phy_id); +out_free_resp: + kfree(disc_resp); + return res; } /** diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index b54fafb48..2373dad01 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -3169,10 +3169,10 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) } cmdwqe = &cmdiocbq->wqe; - memset(cmdwqe, 0, sizeof(union lpfc_wqe)); + memset(cmdwqe, 0, sizeof(*cmdwqe)); if (phba->sli_rev < LPFC_SLI_REV4) { rspwqe = &rspiocbq->wqe; - memset(rspwqe, 0, sizeof(union lpfc_wqe)); + memset(rspwqe, 0, sizeof(*rspwqe)); } INIT_LIST_HEAD(&head); diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index f7cfac0da..1c64da3b2 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1586,7 +1586,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) wqe = &nvmewqe->wqe; /* Initialize WQE */ - memset(wqe, 0, sizeof(union lpfc_wqe)); + memset(wqe, 0, sizeof(*wqe)); ctx_buf->iocbq->cmd_dmabuf = NULL; spin_lock(&phba->sli4_hba.sgl_list_lock); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 809be43f4..8e6ac08e5 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -7398,7 +7398,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) return -EFAULT; } - issue_diag_reset: + return 0; + +issue_diag_reset: rc = _base_diag_reset(ioc); return rc; } diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index e885c1dbf..e2f1b186e 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -1775,9 +1775,9 @@ static ssize_t raid_state_show(struct device *dev, name = myrb_devstate_name(ldev_info->state); if (name) - ret = snprintf(buf, 32, "%s\n", name); + ret = snprintf(buf, 64, "%s\n", name); else - ret = snprintf(buf, 32, "Invalid (%02X)\n", + ret = snprintf(buf, 64, "Invalid (%02X)\n", ldev_info->state); } else { struct myrb_pdev_state *pdev_info = sdev->hostdata; @@ -1796,9 +1796,9 @@ static ssize_t raid_state_show(struct device *dev, else name = myrb_devstate_name(pdev_info->state); if (name) - ret = snprintf(buf, 32, "%s\n", name); + ret = snprintf(buf, 64, "%s\n", name); else - ret = snprintf(buf, 32, "Invalid (%02X)\n", + ret = snprintf(buf, 64, "Invalid (%02X)\n", pdev_info->state); } return ret; @@ -1886,11 +1886,11 @@ static ssize_t raid_level_show(struct device *dev, name = myrb_raidlevel_name(ldev_info->raid_level); if (!name) - return snprintf(buf, 32, "Invalid (%02X)\n", + return snprintf(buf, 64, "Invalid (%02X)\n", ldev_info->state); - return snprintf(buf, 32, "%s\n", name); + return snprintf(buf, 64, "%s\n", name); } - return snprintf(buf, 32, "Physical Drive\n"); + return snprintf(buf, 64, "Physical Drive\n"); } static DEVICE_ATTR_RO(raid_level); @@ -1903,15 +1903,15 @@ static ssize_t rebuild_show(struct device *dev, unsigned char status; if (sdev->channel < myrb_logical_channel(sdev->host)) - return snprintf(buf, 32, "physical device - not rebuilding\n"); + return snprintf(buf, 64, "physical device - not rebuilding\n"); status = myrb_get_rbld_progress(cb, &rbld_buf); if (rbld_buf.ldev_num != sdev->id || status != MYRB_STATUS_SUCCESS) - return snprintf(buf, 32, "not rebuilding\n"); + return snprintf(buf, 64, "not rebuilding\n"); - return snprintf(buf, 32, "rebuilding block %u of %u\n", + return snprintf(buf, 64, "rebuilding block %u of %u\n", rbld_buf.ldev_size - rbld_buf.blocks_left, rbld_buf.ldev_size); } diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 7eb8c39da..95e7c00cb 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -947,9 +947,9 @@ static ssize_t raid_state_show(struct device *dev, name = myrs_devstate_name(ldev_info->dev_state); if (name) - ret = snprintf(buf, 32, "%s\n", name); + ret = snprintf(buf, 64, "%s\n", name); else - ret = snprintf(buf, 32, "Invalid (%02X)\n", + ret = snprintf(buf, 64, "Invalid (%02X)\n", ldev_info->dev_state); } else { struct myrs_pdev_info *pdev_info; @@ -958,9 +958,9 @@ static ssize_t raid_state_show(struct device *dev, pdev_info = sdev->hostdata; name = myrs_devstate_name(pdev_info->dev_state); if (name) - ret = snprintf(buf, 32, "%s\n", name); + ret = snprintf(buf, 64, "%s\n", name); else - ret = snprintf(buf, 32, "Invalid (%02X)\n", + ret = snprintf(buf, 64, "Invalid (%02X)\n", pdev_info->dev_state); } return ret; @@ -1066,13 +1066,13 @@ static ssize_t raid_level_show(struct device *dev, ldev_info = sdev->hostdata; name = myrs_raid_level_name(ldev_info->raid_level); if (!name) - return snprintf(buf, 32, "Invalid (%02X)\n", + return snprintf(buf, 64, "Invalid (%02X)\n", ldev_info->dev_state); } else name = myrs_raid_level_name(MYRS_RAID_PHYSICAL); - return snprintf(buf, 32, "%s\n", name); + return snprintf(buf, 64, "%s\n", name); } static DEVICE_ATTR_RO(raid_level); @@ -1086,7 +1086,7 @@ static ssize_t rebuild_show(struct device *dev, unsigned char status; if (sdev->channel < cs->ctlr_info->physchan_present) - return snprintf(buf, 32, "physical device - not rebuilding\n"); + return snprintf(buf, 64, "physical device - not rebuilding\n"); ldev_info = sdev->hostdata; ldev_num = ldev_info->ldev_num; @@ -1098,11 +1098,11 @@ static ssize_t rebuild_show(struct device *dev, return -EIO; } if (ldev_info->rbld_active) { - return snprintf(buf, 32, "rebuilding block %zu of %zu\n", + return snprintf(buf, 64, "rebuilding block %zu of %zu\n", (size_t)ldev_info->rbld_lba, (size_t)ldev_info->cfg_devsize); } else - return snprintf(buf, 32, "not rebuilding\n"); + return snprintf(buf, 64, "not rebuilding\n"); } static ssize_t rebuild_store(struct device *dev, @@ -1190,7 +1190,7 @@ static ssize_t consistency_check_show(struct device *dev, unsigned short ldev_num; if (sdev->channel < cs->ctlr_info->physchan_present) - return snprintf(buf, 32, "physical device - not checking\n"); + return snprintf(buf, 64, "physical device - not checking\n"); ldev_info = sdev->hostdata; if (!ldev_info) @@ -1198,11 +1198,11 @@ static ssize_t consistency_check_show(struct device *dev, ldev_num = ldev_info->ldev_num; myrs_get_ldev_info(cs, ldev_num, ldev_info); if (ldev_info->cc_active) - return snprintf(buf, 32, "checking block %zu of %zu\n", + return snprintf(buf, 64, "checking block %zu of %zu\n", (size_t)ldev_info->cc_lba, (size_t)ldev_info->cfg_devsize); else - return snprintf(buf, 32, "not checking\n"); + return snprintf(buf, 64, "not checking\n"); } static ssize_t consistency_check_store(struct device *dev, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 07fbaa452..0d414c1aa 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2741,7 +2741,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) return; if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { - qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); + /* Will wait for wind down of adapter */ + ql_dbg(ql_dbg_aer, fcport->vha, 0x900c, + "%s pci offline detected (id %06x)\n", __func__, + fcport->d_id.b24); + qla_pci_set_eeh_busy(fcport->vha); + qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, + 0, WAIT_TARGET); return; } } @@ -2763,7 +2769,11 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) vha = fcport->vha; if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { - qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); + /* Will wait for wind down of adapter */ + ql_dbg(ql_dbg_aer, fcport->vha, 0x900b, + "%s pci offline detected (id %06x)\n", __func__, + fcport->d_id.b24); + qla_pci_set_eeh_busy(vha); qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET); return; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 1713588f6..31c451dae 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -83,7 +83,7 @@ typedef union { #include "qla_nvme.h" #define QLA2XXX_DRIVER_NAME "qla2xxx" #define QLA2XXX_APIDEV "ql2xapidev" -#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc." +#define QLA2XXX_MANUFACTURER "Marvell" /* * We have MAILBOX_REGISTER_COUNT sized arrays in a few places, diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 2e4537f9e..73cd869ca 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -44,7 +44,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); -extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool); +extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *); extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 884ed7725..c64e44964 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1187,8 +1187,12 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); + /* + * use qla24xx_async_gnl_sp_done to purge all pending gnl request. + * kref_put is call behind the scene. + */ + sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR; + qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR); fcport->flags &= ~(FCF_ASYNC_SENT); done: fcport->flags &= ~(FCF_ASYNC_ACTIVE); @@ -2666,6 +2670,40 @@ exit: return rval; } +static void qla_enable_fce_trace(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + + if (ha->fce) { + ha->flags.fce_enabled = 1; + memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); + rval = qla2x00_enable_fce_trace(vha, + ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); + + if (rval) { + ql_log(ql_log_warn, vha, 0x8033, + "Unable to reinitialize FCE (%d).\n", rval); + ha->flags.fce_enabled = 0; + } + } +} + +static void qla_enable_eft_trace(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + + if (ha->eft) { + memset(ha->eft, 0, EFT_SIZE); + rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); + + if (rval) { + ql_log(ql_log_warn, vha, 0x8034, + "Unable to reinitialize EFT (%d).\n", rval); + } + } +} /* * qla2x00_initialize_adapter * Initialize board. @@ -3669,9 +3707,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) } static void -qla2x00_init_fce_trace(scsi_qla_host_t *vha) +qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) { - int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; @@ -3700,27 +3737,17 @@ qla2x00_init_fce_trace(scsi_qla_host_t *vha) return; } - rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, - ha->fce_mb, &ha->fce_bufs); - if (rval) { - ql_log(ql_log_warn, vha, 0x00bf, - "Unable to initialize FCE (%d).\n", rval); - dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); - return; - } - ql_dbg(ql_dbg_init, vha, 0x00c0, "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); - ha->flags.fce_enabled = 1; ha->fce_dma = tc_dma; ha->fce = tc; + ha->fce_bufs = FCE_NUM_BUFFERS; } static void -qla2x00_init_eft_trace(scsi_qla_host_t *vha) +qla2x00_alloc_eft_trace(scsi_qla_host_t *vha) { - int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; @@ -3745,14 +3772,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha) return; } - rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); - if (rval) { - ql_log(ql_log_warn, vha, 0x00c2, - "Unable to initialize EFT (%d).\n", rval); - dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); - return; - } - ql_dbg(ql_dbg_init, vha, 0x00c3, "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); @@ -3760,13 +3779,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha) ha->eft = tc; } -static void -qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) -{ - qla2x00_init_fce_trace(vha); - qla2x00_init_eft_trace(vha); -} - void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) { @@ -3821,10 +3833,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); - qla2x00_init_fce_trace(vha); + qla2x00_alloc_fce_trace(vha); if (ha->fce) fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; - qla2x00_init_eft_trace(vha); + qla2x00_alloc_eft_trace(vha); if (ha->eft) eft_size = EFT_SIZE; } @@ -4254,7 +4266,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; unsigned long flags; - uint16_t fw_major_version; int done_once = 0; if (IS_P3P_TYPE(ha)) { @@ -4321,7 +4332,6 @@ execute_fw_with_lr: goto failed; enable_82xx_npiv: - fw_major_version = ha->fw_major_version; if (IS_P3P_TYPE(ha)) qla82xx_check_md_needed(vha); else @@ -4350,12 +4360,11 @@ enable_82xx_npiv: if (rval != QLA_SUCCESS) goto failed; - if (!fw_major_version && !(IS_P3P_TYPE(ha))) - qla2x00_alloc_offload_mem(vha); - if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) qla2x00_alloc_fw_dump(vha); + qla_enable_fce_trace(vha); + qla_enable_eft_trace(vha); } else { goto failed; } @@ -7540,12 +7549,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) int qla2x00_abort_isp(scsi_qla_host_t *vha) { - int rval; uint8_t status = 0; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *tvp; struct req_que *req = ha->req_q_map[0]; unsigned long flags; + fc_port_t *fcport; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); @@ -7614,6 +7623,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); return status; } + + /* User may have updated [fcp|nvme] prefer in flash */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (NVME_PRIORITY(ha, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + } + if (!qla2x00_restart_isp(vha)) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); @@ -7634,31 +7652,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) if (IS_QLA81XX(ha) || IS_QLA8031(ha)) qla2x00_get_fw_version(vha); - if (ha->fce) { - ha->flags.fce_enabled = 1; - memset(ha->fce, 0, - fce_calc_size(ha->fce_bufs)); - rval = qla2x00_enable_fce_trace(vha, - ha->fce_dma, ha->fce_bufs, ha->fce_mb, - &ha->fce_bufs); - if (rval) { - ql_log(ql_log_warn, vha, 0x8033, - "Unable to reinitialize FCE " - "(%d).\n", rval); - ha->flags.fce_enabled = 0; - } - } - if (ha->eft) { - memset(ha->eft, 0, EFT_SIZE); - rval = qla2x00_enable_eft_trace(vha, - ha->eft_dma, EFT_NUM_BUFFERS); - if (rval) { - ql_log(ql_log_warn, vha, 0x8034, - "Unable to reinitialize EFT " - "(%d).\n", rval); - } - } } else { /* failed the ISP abort */ vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { @@ -7708,6 +7702,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); + /* User may have updated [fcp|nvme] prefer in flash */ + list_for_each_entry(fcport, &vp->vp_fcports, list) { + if (NVME_PRIORITY(ha, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + } + qla2x00_vp_abort_isp(vp); spin_lock_irqsave(&ha->vport_slock, flags); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 9e524d52d..7bccd525e 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2588,6 +2588,33 @@ void qla2x00_sp_release(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); + struct scsi_qla_host *vha = sp->vha; + + switch (sp->type) { + case SRB_CT_PTHRU_CMD: + /* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */ + if (sp->u.iocb_cmd.u.ctarg.req && + (!sp->fcport || + sp->u.iocb_cmd.u.ctarg.req != sp->fcport->ct_desc.ct_sns)) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + if (sp->u.iocb_cmd.u.ctarg.rsp && + (!sp->fcport || + sp->u.iocb_cmd.u.ctarg.rsp != sp->fcport->ct_desc.ct_sns)) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + break; + default: + break; + } sp->free(sp); } @@ -2611,7 +2638,8 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp) { struct srb_iocb *elsio = &sp->u.iocb_cmd; - kfree(sp->fcport); + if (sp->fcport) + qla2x00_free_fcport(sp->fcport); if (elsio->u.els_logo.els_logo_pyld) dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, @@ -2693,7 +2721,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { - kfree(fcport); + qla2x00_free_fcport(fcport); ql_log(ql_log_info, vha, 0x70e6, "SRB allocation failed\n"); return -ENOMEM; @@ -2724,6 +2752,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, if (!elsio->u.els_logo.els_logo_pyld) { /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); + qla2x00_free_fcport(fcport); return QLA_FUNCTION_FAILED; } @@ -2748,6 +2777,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, if (rval != QLA_SUCCESS) { /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); + qla2x00_free_fcport(fcport); return QLA_FUNCTION_FAILED; } @@ -3013,7 +3043,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, - fc_port_t *fcport, bool wait) + fc_port_t *fcport) { srb_t *sp; struct srb_iocb *elsio = NULL; @@ -3028,8 +3058,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, if (!sp) { ql_log(ql_log_info, vha, 0x70e6, "SRB allocation failed\n"); - fcport->flags &= ~FCF_ASYNC_ACTIVE; - return -ENOMEM; + goto done; } fcport->flags |= FCF_ASYNC_SENT; @@ -3038,9 +3067,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, ql_dbg(ql_dbg_io, vha, 0x3073, "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24); - if (wait) - sp->flags = SRB_WAKEUP_ON_COMP; - sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport; @@ -3056,7 +3082,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, if (!elsio->u.els_plogi.els_plogi_pyld) { rval = QLA_FUNCTION_FAILED; - goto out; + goto done_free_sp; } resp_ptr = elsio->u.els_plogi.els_resp_pyld = @@ -3065,7 +3091,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, if (!elsio->u.els_plogi.els_resp_pyld) { rval = QLA_FUNCTION_FAILED; - goto out; + goto done_free_sp; } ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); @@ -3080,7 +3106,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { struct fc_els_flogi *p = ptr; - p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); } @@ -3089,10 +3114,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, sizeof(*elsio->u.els_plogi.els_plogi_pyld)); - init_completion(&elsio->u.els_plogi.comp); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - rval = QLA_FUNCTION_FAILED; + fcport->flags |= FCF_LOGIN_NEEDED; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + goto done_free_sp; } else { ql_dbg(ql_dbg_disc, vha, 0x3074, "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", @@ -3100,21 +3126,15 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, fcport->d_id.b24, vha->d_id.b24); } - if (wait) { - wait_for_completion(&elsio->u.els_plogi.comp); - - if (elsio->u.els_plogi.comp_status != CS_COMPLETE) - rval = QLA_FUNCTION_FAILED; - } else { - goto done; - } + return rval; -out: - fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); +done_free_sp: qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); done: + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); return rval; } @@ -3916,7 +3936,7 @@ qla2x00_start_sp(srb_t *sp) return -EAGAIN; } - pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); + pkt = qla2x00_alloc_iocbs_ready(sp->qpair, sp); if (!pkt) { rval = -EAGAIN; ql_log(ql_log_warn, vha, 0x700c, diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f794f4363..1fd948598 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -194,7 +194,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || ha->flags.eeh_busy) { ql_log(ql_log_warn, vha, 0xd035, - "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", + "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]); rval = QLA_ABORTED; goto premature_exit; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 25ca0544b..25d0c2bfd 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5562,7 +5562,7 @@ qla2x00_do_work(struct scsi_qla_host *vha) break; case QLA_EVT_ELS_PLOGI: qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, - e->u.fcport.fcport, false); + e->u.fcport.fcport); break; case QLA_EVT_SA_REPLACE: rc = qla24xx_issue_sa_replace_iocb(vha, e); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 5a5beb417..043cfa10c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1079,6 +1079,16 @@ void qlt_free_session_done(struct work_struct *work) "%s: sess %p logout completed\n", __func__, sess); } + /* check for any straggling io left behind */ + if (!(sess->flags & FCF_FCP2_DEVICE) && + qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) { + ql_log(ql_log_warn, vha, 0x3027, + "IO not return. Resetting.\n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + } + if (sess->logo_ack_needed) { sess->logo_ack_needed = 0; qla24xx_async_notify_ack(vha, sess, diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index bab00b65b..852d509b1 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1611,6 +1611,40 @@ int scsi_add_device(struct Scsi_Host *host, uint channel, } EXPORT_SYMBOL(scsi_add_device); +int scsi_resume_device(struct scsi_device *sdev) +{ + struct device *dev = &sdev->sdev_gendev; + int ret = 0; + + device_lock(dev); + + /* + * Bail out if the device or its queue are not running. Otherwise, + * the rescan may block waiting for commands to be executed, with us + * holding the device lock. This can result in a potential deadlock + * in the power management core code when system resume is on-going. + */ + if (sdev->sdev_state != SDEV_RUNNING || + blk_queue_pm_only(sdev->request_queue)) { + ret = -EWOULDBLOCK; + goto unlock; + } + + if (dev->driver && try_module_get(dev->driver->owner)) { + struct scsi_driver *drv = to_scsi_driver(dev->driver); + + if (drv->resume) + ret = drv->resume(dev); + module_put(dev->driver->owner); + } + +unlock: + device_unlock(dev); + + return ret; +} +EXPORT_SYMBOL(scsi_resume_device); + int scsi_rescan_device(struct scsi_device *sdev) { struct device *dev = &sdev->sdev_gendev; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 4433b02c8..f32236c3f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -110,6 +110,7 @@ static int sd_suspend_system(struct device *); static int sd_suspend_runtime(struct device *); static int sd_resume_system(struct device *); static int sd_resume_runtime(struct device *); +static int sd_resume(struct device *); static void sd_rescan(struct device *); static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); static void sd_uninit_command(struct scsi_cmnd *SCpnt); @@ -691,6 +692,7 @@ static struct scsi_driver sd_template = { .pm = &sd_pm_ops, }, .rescan = sd_rescan, + .resume = sd_resume, .init_command = sd_init_command, .uninit_command = sd_uninit_command, .done = sd_done, @@ -3634,7 +3636,7 @@ static int sd_probe(struct device *dev) error = device_add_disk(dev, gd, NULL); if (error) { - put_device(&sdkp->disk_dev); + device_unregister(&sdkp->disk_dev); put_disk(gd); goto out; } @@ -3830,7 +3832,22 @@ static int sd_suspend_runtime(struct device *dev) return sd_suspend_common(dev, true); } -static int sd_resume(struct device *dev, bool runtime) +static int sd_resume(struct device *dev) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + + if (sdkp->device->no_start_on_resume) + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + + if (opal_unlock_from_suspend(sdkp->opal_dev)) { + sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n"); + return -EIO; + } + + return 0; +} + +static int sd_resume_common(struct device *dev, bool runtime) { struct scsi_disk *sdkp = dev_get_drvdata(dev); int ret = 0; @@ -3849,7 +3866,7 @@ static int sd_resume(struct device *dev, bool runtime) } if (!ret) { - opal_unlock_from_suspend(sdkp->opal_dev); + sd_resume(dev); sdkp->suspended = false; } @@ -3868,7 +3885,7 @@ static int sd_resume_system(struct device *dev) return 0; } - return sd_resume(dev, false); + return sd_resume_common(dev, false); } static int sd_resume_runtime(struct device *dev) @@ -3892,7 +3909,7 @@ static int sd_resume_runtime(struct device *dev) "Failed to clear sense data\n"); } - return sd_resume(dev, true); + return sd_resume_common(dev, true); } /** diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c index 219483b79..37fd65599 100644 --- a/drivers/slimbus/core.c +++ b/drivers/slimbus/core.c @@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev, if (ret < 0) goto err; } else if (report_present) { - ret = ida_simple_get(&ctrl->laddr_ida, - 0, SLIM_LA_MANAGER - 1, GFP_KERNEL); + ret = ida_alloc_max(&ctrl->laddr_ida, + SLIM_LA_MANAGER - 1, GFP_KERNEL); if (ret < 0) goto err; diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c index 1d2b27e3e..b811446e0 100644 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, struct qbman_eq_desc *ed; int i, ret; - ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL); + ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL); if (!ed) return -ENOMEM; diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 739e4eee6..7e9074519 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -991,7 +991,7 @@ struct qman_portal { /* linked-list of CSCN handlers. */ struct list_head cgr_cbs; /* list lock */ - spinlock_t cgr_lock; + raw_spinlock_t cgr_lock; struct work_struct congestion_work; struct work_struct mr_work; char irqname[MAX_IRQNAME]; @@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal, /* if the given mask is NULL, assume all CGRs can be seen */ qman_cgrs_fill(&portal->cgrs[0]); INIT_LIST_HEAD(&portal->cgr_cbs); - spin_lock_init(&portal->cgr_lock); + raw_spin_lock_init(&portal->cgr_lock); INIT_WORK(&portal->congestion_work, qm_congestion_task); INIT_WORK(&portal->mr_work, qm_mr_process_task); portal->bits = 0; @@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work) union qm_mc_result *mcr; struct qman_cgr *cgr; - spin_lock(&p->cgr_lock); + /* + * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock! + */ + raw_spin_lock_irq(&p->cgr_lock); qm_mc_start(&p->p); qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); if (!qm_mc_result_timeout(&p->p, &mcr)) { - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); qman_p_irqsource_add(p, QM_PIRQ_CSCI); return; @@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work) list_for_each_entry(cgr, &p->cgr_cbs, node) if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); qman_p_irqsource_add(p, QM_PIRQ_CSCI); } @@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, preempt_enable(); cgr->chan = p->config->channel; - spin_lock(&p->cgr_lock); + raw_spin_lock_irq(&p->cgr_lock); if (opts) { struct qm_mcc_initcgr local_opts = *opts; @@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) cgr->cb(p, cgr, 1); out: - spin_unlock(&p->cgr_lock); + raw_spin_unlock_irq(&p->cgr_lock); put_affine_portal(); return ret; } @@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr) return -EINVAL; memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); - spin_lock_irqsave(&p->cgr_lock, irqflags); + raw_spin_lock_irqsave(&p->cgr_lock, irqflags); list_del(&cgr->node); /* * If there are no other CGR objects for this CGRID in the list, @@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr) /* add back to the list */ list_add(&cgr->node, &p->cgr_cbs); release_lock: - spin_unlock_irqrestore(&p->cgr_lock, irqflags); + raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); put_affine_portal(); return ret; } @@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts) if (!p) return -EINVAL; - spin_lock_irqsave(&p->cgr_lock, irqflags); + raw_spin_lock_irqsave(&p->cgr_lock, irqflags); ret = qm_modify_cgr(cgr, 0, opts); - spin_unlock_irqrestore(&p->cgr_lock, irqflags); + raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); put_affine_portal(); return ret; } diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig index eb656b331..f19e74d34 100644 --- a/drivers/soc/microchip/Kconfig +++ b/drivers/soc/microchip/Kconfig @@ -1,5 +1,5 @@ config POLARFIRE_SOC_SYS_CTRL - tristate "POLARFIRE_SOC_SYS_CTRL" + tristate "Microchip PolarFire SoC (MPFS) system controller support" depends on POLARFIRE_SOC_MAILBOX help This driver adds support for the PolarFire SoC (MPFS) system controller. diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 9a90f241b..6efe36aeb 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -195,7 +195,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = { [SC8280XP_CX] = &cx, [SC8280XP_CX_AO] = &cx_ao, [SC8280XP_EBI] = &ebi, - [SC8280XP_GFX] = &gfx, [SC8280XP_LCX] = &lcx, [SC8280XP_LMX] = &lmx, [SC8280XP_MMCX] = &mmcx, diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 6e95efb50..f9ec87429 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -787,17 +787,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); mtk_spi_setup_packet(master); - cnt = mdata->xfer_len / 4; - iowrite32_rep(mdata->base + SPI_TX_DATA_REG, - trans->tx_buf + mdata->num_xfered, cnt); + if (trans->tx_buf) { + cnt = mdata->xfer_len / 4; + iowrite32_rep(mdata->base + SPI_TX_DATA_REG, + trans->tx_buf + mdata->num_xfered, cnt); - remainder = mdata->xfer_len % 4; - if (remainder > 0) { - reg_val = 0; - memcpy(®_val, - trans->tx_buf + (cnt * 4) + mdata->num_xfered, - remainder); - writel(reg_val, mdata->base + SPI_TX_DATA_REG); + remainder = mdata->xfer_len % 4; + if (remainder > 0) { + reg_val = 0; + memcpy(®_val, + trans->tx_buf + (cnt * 4) + mdata->num_xfered, + remainder); + writel(reg_val, mdata->base + SPI_TX_DATA_REG); + } } mtk_spi_enable_transfer(master); diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 87d36948c..c6bd86a53 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -100,15 +100,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel) static struct gb_channel *get_channel_from_mode(struct gb_light *light, u32 mode) { - struct gb_channel *channel = NULL; + struct gb_channel *channel; int i; for (i = 0; i < light->channels_count; i++) { channel = &light->channels[i]; - if (channel && channel->mode == mode) - break; + if (channel->mode == mode) + return channel; } - return channel; + return NULL; } static int __gb_lights_flash_intensity_set(struct gb_channel *channel, diff --git a/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c index 0c61a2dec..81fc48356 100644 --- a/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c +++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c @@ -1462,7 +1462,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio budget_av->has_saa7113 = 1; err = saa7146_vv_init(dev, &vv_data); if (err != 0) { - /* fixme: proper cleanup here */ + ttpci_budget_deinit(&budget_av->budget); + kfree(budget_av); ERR("cannot init vv subsystem\n"); return err; } @@ -1471,9 +1472,10 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio vv_data.vid_ops.vidioc_s_input = vidioc_s_input; if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) { - /* fixme: proper cleanup here */ - ERR("cannot register capture v4l2 device\n"); saa7146_vv_release(dev); + ttpci_budget_deinit(&budget_av->budget); + kfree(budget_av); + ERR("cannot register capture v4l2 device\n"); return err; } diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c index 1fd39a2fc..95cca281e 100644 --- a/drivers/staging/media/imx/imx-media-csc-scaler.c +++ b/drivers/staging/media/imx/imx-media-csc-scaler.c @@ -803,6 +803,7 @@ static int ipu_csc_scaler_release(struct file *file) dev_dbg(priv->dev, "Releasing instance %p\n", ctx); + v4l2_ctrl_handler_free(&ctx->ctrl_hdlr); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index e530767e8..55cc44a40 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu, struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; /* Initialize subdev media entity */ + imgu_sd->subdev.entity.ops = &imgu_media_ops; + for (i = 0; i < IMGU_NODE_NUM; i++) { + imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ? + MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; + } r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM, imgu_sd->subdev_pads); if (r) { @@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu, "failed initialize subdev media entity (%d)\n", r); return r; } - imgu_sd->subdev.entity.ops = &imgu_media_ops; - for (i = 0; i < IMGU_NODE_NUM; i++) { - imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ? - MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; - } /* Initialize subdev */ v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops); @@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe, } /* Initialize media entities */ + node->vdev_pad.flags = node->output ? + MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; + vdev->entity.ops = NULL; r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad); if (r) { dev_err(dev, "failed initialize media entity (%d)\n", r); mutex_destroy(&node->lock); return r; } - node->vdev_pad.flags = node->output ? - MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; - vdev->entity.ops = NULL; /* Initialize vbq */ vbq->type = node->vdev_fmt.type; diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h index 93a219600..cb99610f3 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.h +++ b/drivers/staging/media/sunxi/cedrus/cedrus.h @@ -109,6 +109,11 @@ struct cedrus_buffer { unsigned int position; enum cedrus_h264_pic_type pic_type; } h264; + struct { + void *mv_col_buf; + dma_addr_t mv_col_buf_dma; + ssize_t mv_col_buf_size; + } h265; } codec; }; @@ -142,10 +147,6 @@ struct cedrus_ctx { ssize_t intra_pred_buf_size; } h264; struct { - void *mv_col_buf; - dma_addr_t mv_col_buf_addr; - ssize_t mv_col_buf_size; - ssize_t mv_col_buf_unit_size; void *neighbor_info_buf; dma_addr_t neighbor_info_buf_addr; void *entry_points_buf; diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c index 625f77a8c..9f13c942a 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c @@ -90,12 +90,13 @@ static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data, } static inline dma_addr_t -cedrus_h265_frame_info_mv_col_buf_addr(struct cedrus_ctx *ctx, - unsigned int index, unsigned int field) +cedrus_h265_frame_info_mv_col_buf_addr(struct vb2_buffer *buf, + unsigned int field) { - return ctx->codec.h265.mv_col_buf_addr + index * - ctx->codec.h265.mv_col_buf_unit_size + - field * ctx->codec.h265.mv_col_buf_unit_size / 2; + struct cedrus_buffer *cedrus_buf = vb2_to_cedrus_buffer(buf); + + return cedrus_buf->codec.h265.mv_col_buf_dma + + field * cedrus_buf->codec.h265.mv_col_buf_size / 2; } static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx, @@ -108,9 +109,8 @@ static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx, dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buf, 0); dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buf, 1); dma_addr_t mv_col_buf_addr[2] = { - cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index, 0), - cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index, - field_pic ? 1 : 0) + cedrus_h265_frame_info_mv_col_buf_addr(buf, 0), + cedrus_h265_frame_info_mv_col_buf_addr(buf, field_pic ? 1 : 0) }; u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO + VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index; @@ -412,12 +412,13 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) unsigned int width_in_ctb_luma, ctb_size_luma; unsigned int log2_max_luma_coding_block_size; unsigned int ctb_addr_x, ctb_addr_y; + struct cedrus_buffer *cedrus_buf; dma_addr_t src_buf_addr; - dma_addr_t src_buf_end_addr; u32 chroma_log2_weight_denom; u32 num_entry_point_offsets; u32 output_pic_list_index; u32 pic_order_cnt[2]; + size_t slice_bytes; u8 padding; int count; u32 reg; @@ -428,6 +429,8 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) decode_params = run->h265.decode_params; pred_weight_table = &slice_params->pred_weight_table; num_entry_point_offsets = slice_params->num_entry_point_offsets; + cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf); + slice_bytes = vb2_get_plane_payload(&run->src->vb2_buf, 0); /* * If entry points offsets are present, we should get them @@ -445,31 +448,25 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) DIV_ROUND_UP(sps->pic_width_in_luma_samples, ctb_size_luma); /* MV column buffer size and allocation. */ - if (!ctx->codec.h265.mv_col_buf_size) { - unsigned int num_buffers = - run->dst->vb2_buf.vb2_queue->num_buffers; - + if (!cedrus_buf->codec.h265.mv_col_buf_size) { /* * Each CTB requires a MV col buffer with a specific unit size. * Since the address is given with missing lsb bits, 1 KiB is * added to each buffer to ensure proper alignment. */ - ctx->codec.h265.mv_col_buf_unit_size = + cedrus_buf->codec.h265.mv_col_buf_size = DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) * DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) * CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K; - ctx->codec.h265.mv_col_buf_size = num_buffers * - ctx->codec.h265.mv_col_buf_unit_size; - /* Buffer is never accessed by CPU, so we can skip kernel mapping. */ - ctx->codec.h265.mv_col_buf = + cedrus_buf->codec.h265.mv_col_buf = dma_alloc_attrs(dev->dev, - ctx->codec.h265.mv_col_buf_size, - &ctx->codec.h265.mv_col_buf_addr, + cedrus_buf->codec.h265.mv_col_buf_size, + &cedrus_buf->codec.h265.mv_col_buf_dma, GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING); - if (!ctx->codec.h265.mv_col_buf) { - ctx->codec.h265.mv_col_buf_size = 0; + if (!cedrus_buf->codec.h265.mv_col_buf) { + cedrus_buf->codec.h265.mv_col_buf_size = 0; return -ENOMEM; } } @@ -481,7 +478,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0); - reg = slice_params->bit_size; + reg = slice_bytes * 8; cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg); /* Source beginning and end addresses. */ @@ -495,10 +492,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run) cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg); - src_buf_end_addr = src_buf_addr + - DIV_ROUND_UP(slice_params->bit_size, 8); - - reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr); + reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_addr + slice_bytes); cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg); /* Coding tree block address */ @@ -816,9 +810,6 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx) { struct cedrus_dev *dev = ctx->dev; - /* The buffer size is calculated at setup time. */ - ctx->codec.h265.mv_col_buf_size = 0; - /* Buffer is never accessed by CPU, so we can skip kernel mapping. */ ctx->codec.h265.neighbor_info_buf = dma_alloc_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE, @@ -845,14 +836,24 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx) static void cedrus_h265_stop(struct cedrus_ctx *ctx) { struct cedrus_dev *dev = ctx->dev; + struct cedrus_buffer *buf; + struct vb2_queue *vq; + unsigned int i; - if (ctx->codec.h265.mv_col_buf_size > 0) { - dma_free_attrs(dev->dev, ctx->codec.h265.mv_col_buf_size, - ctx->codec.h265.mv_col_buf, - ctx->codec.h265.mv_col_buf_addr, - DMA_ATTR_NO_KERNEL_MAPPING); + vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + + for (i = 0; i < vq->num_buffers; i++) { + buf = vb2_to_cedrus_buffer(vb2_get_buffer(vq, i)); - ctx->codec.h265.mv_col_buf_size = 0; + if (buf->codec.h265.mv_col_buf_size > 0) { + dma_free_attrs(dev->dev, + buf->codec.h265.mv_col_buf_size, + buf->codec.h265.mv_col_buf, + buf->codec.h265.mv_col_buf_dma, + DMA_ATTR_NO_KERNEL_MAPPING); + + buf->codec.h265.mv_col_buf_size = 0; + } } dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE, diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c index cb921c949..90eb4c593 100644 --- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c +++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c @@ -938,8 +938,9 @@ static int create_component(struct vchiq_mmal_instance *instance, /* build component create message */ m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE; m.u.component_create.client_component = component->client_component; - strncpy(m.u.component_create.name, name, - sizeof(m.u.component_create.name)); + strscpy_pad(m.u.component_create.name, name, + sizeof(m.u.component_create.name)); + m.u.component_create.pid = 0; ret = send_synchronous_mmal_msg(instance, &m, sizeof(m.u.component_create), diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index 4b1092127..1892e49a8 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func) if (rc) { pr_err("device registration failed, err: %d\n", rc); put_device(&optee_device->dev); + return rc; } if (func == PTA_CMD_GET_DEVICES_SUPP) device_create_file(&optee_device->dev, &dev_attr_need_supplicant); - return rc; + return 0; } static int __optee_enumerate_devices(u32 func) diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index 24b474925..0b424bc8c 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -201,7 +201,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd res = dfc->power_ops->get_real_power(df, power, freq, voltage); if (!res) { - state = dfc->capped_state; + state = dfc->max_state - dfc->capped_state; /* Convert EM power into milli-Watts first */ dfc->res_util = dfc->em_pd->table[state].power; diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index dca1abe36..55451ff84 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -714,6 +714,7 @@ static void exar_pci_remove(struct pci_dev *pcidev) for (i = 0; i < priv->nr; i++) serial8250_unregister_port(priv->line[i]); + /* Ensure that every init quirk is properly torn down */ if (priv->board->exit) priv->board->exit(pcidev); } @@ -728,10 +729,6 @@ static int __maybe_unused exar_suspend(struct device *dev) if (priv->line[i] >= 0) serial8250_suspend_port(priv->line[i]); - /* Ensure that every init quirk is properly torn down */ - if (priv->board->exit) - priv->board->exit(pcidev); - return 0; } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 8efe31448..c744feabd 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1377,9 +1377,6 @@ static void autoconfig_irq(struct uart_8250_port *up) inb_p(ICP); } - if (uart_console(port)) - console_lock(); - /* forget possible initially masked and pending IRQ */ probe_irq_off(probe_irq_on()); save_mcr = serial8250_in_MCR(up); @@ -1410,9 +1407,6 @@ static void autoconfig_irq(struct uart_8250_port *up) if (port->flags & UPF_FOURPORT) outb_p(save_ICP, ICP); - if (uart_console(port)) - console_unlock(); - port->irq = (irq > 0) ? irq : 0; } diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index c5a9b89c4..f94c78263 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2213,9 +2213,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, lpuart32_write(&sport->port, bd, UARTBAUD); lpuart32_serial_setbrg(sport, baud); - lpuart32_write(&sport->port, modem, UARTMODIR); - lpuart32_write(&sport->port, ctrl, UARTCTRL); + /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */ + lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); /* restore control register */ + lpuart32_write(&sport->port, ctrl, UARTCTRL); + /* re-enable the CTS if needed */ + lpuart32_write(&sport->port, modem, UARTMODIR); if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE) sport->is_cs7 = true; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index f8962a3d4..573bf7e9b 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -496,8 +496,7 @@ static void imx_uart_stop_tx(struct uart_port *port) } } -/* called with port.lock taken and irqs off */ -static void imx_uart_stop_rx(struct uart_port *port) +static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback) { struct imx_port *sport = (struct imx_port *)port; u32 ucr1, ucr2, ucr4, uts; @@ -519,7 +518,7 @@ static void imx_uart_stop_rx(struct uart_port *port) /* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */ if (port->rs485.flags & SER_RS485_ENABLED && port->rs485.flags & SER_RS485_RTS_ON_SEND && - sport->have_rtscts && !sport->have_rtsgpio) { + sport->have_rtscts && !sport->have_rtsgpio && loopback) { uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)); uts |= UTS_LOOP; imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); @@ -532,6 +531,16 @@ static void imx_uart_stop_rx(struct uart_port *port) } /* called with port.lock taken and irqs off */ +static void imx_uart_stop_rx(struct uart_port *port) +{ + /* + * Stop RX and enable loopback in order to make sure RS485 bus + * is not blocked. Se comment in imx_uart_probe(). + */ + imx_uart_stop_rx_with_loopback_ctrl(port, true); +} + +/* called with port.lock taken and irqs off */ static void imx_uart_enable_ms(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; @@ -719,8 +728,13 @@ static void imx_uart_start_tx(struct uart_port *port) imx_uart_rts_inactive(sport, &ucr2); imx_uart_writel(sport, ucr2, UCR2); + /* + * Since we are about to transmit we can not stop RX + * with loopback enabled because that will make our + * transmitted data being just looped to RX. + */ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) - imx_uart_stop_rx(port); + imx_uart_stop_rx_with_loopback_ctrl(port, false); sport->tx_state = WAIT_AFTER_RTS; diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 163a89f84..d409ef388 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1459,7 +1459,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty if (!ret) return 0; - dev_err(dev, "Unable to reguest IRQ %i\n", irq); + dev_err(dev, "Unable to request IRQ %i\n", irq); out_uart: for (i = 0; i < devtype->nr; i++) { @@ -1633,13 +1633,16 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr, static int max310x_i2c_probe(struct i2c_client *client) { - const struct max310x_devtype *devtype = - device_get_match_data(&client->dev); + const struct max310x_devtype *devtype; struct i2c_client *port_client; struct regmap *regmaps[4]; unsigned int i; u8 port_addr; + devtype = device_get_match_data(&client->dev); + if (!devtype) + return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n"); + if (client->addr < devtype->slave_addr.min || client->addr > devtype->slave_addr.max) return dev_err_probe(&client->dev, -EINVAL, diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index aa2c51b84..589daed19 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -996,11 +996,10 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port) if ((ufstat & info->tx_fifomask) != 0 || (ufstat & info->tx_fifofull)) return 0; - - return 1; + return TIOCSER_TEMT; } - return s3c24xx_serial_txempty_nofifo(port); + return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0; } /* no modem control lines */ diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index f0ed30d0a..fe3f1d655 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2561,7 +2561,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, port->type = PORT_UNKNOWN; flags |= UART_CONFIG_TYPE; } + /* Synchronize with possible boot console. */ + if (uart_console(port)) + console_lock(); port->ops->config_port(port, flags); + if (uart_console(port)) + console_unlock(); } if (port->type != PORT_UNKNOWN) { @@ -2569,6 +2574,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, uart_report_port(drv, port); + /* Synchronize with possible boot console. */ + if (uart_console(port)) + console_lock(); + /* Power up port for set_mctrl() */ uart_change_pm(state, UART_PM_STATE_ON); @@ -2585,6 +2594,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, uart_rs485_config(port); + if (uart_console(port)) + console_unlock(); + /* * If this driver supports console, and it hasn't been * successfully registered yet, try to re-register it. diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 981d2bfcf..48a9ed7c9 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -398,7 +398,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr) char32_t *ln = uniscr->lines[vc->state.y]; unsigned int x = vc->state.x, cols = vc->vc_cols; - memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); + memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); memset32(&ln[cols - nr], ' ', nr); } } @@ -2515,7 +2515,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) } return; case EScsiignore: - if (c >= 20 && c <= 0x3f) + if (c >= 0x20 && c <= 0x3f) return; vc->vc_state = ESnormal; return; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 1f0951be1..fdc1a66b1 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -485,6 +485,7 @@ out_free_mem: static int service_outstanding_interrupt(struct wdm_device *desc) { int rv = 0; + int used; /* submit read urb only if the device is waiting for it */ if (!desc->resp_count || !--desc->resp_count) @@ -499,7 +500,10 @@ static int service_outstanding_interrupt(struct wdm_device *desc) goto out; } - set_bit(WDM_RESPONDING, &desc->flags); + used = test_and_set_bit(WDM_RESPONDING, &desc->flags); + if (used) + goto out; + spin_unlock_irq(&desc->iuspin); rv = usb_submit_urb(desc->response, GFP_KERNEL); spin_lock_irq(&desc->iuspin); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d960a56b7..b1fb04e52 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -123,7 +123,6 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); #define HUB_DEBOUNCE_STEP 25 #define HUB_DEBOUNCE_STABLE 100 -static void hub_release(struct kref *kref); static int usb_reset_and_verify_device(struct usb_device *udev); static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1, @@ -685,14 +684,14 @@ static void kick_hub_wq(struct usb_hub *hub) */ intf = to_usb_interface(hub->intfdev); usb_autopm_get_interface_no_resume(intf); - kref_get(&hub->kref); + hub_get(hub); if (queue_work(hub_wq, &hub->events)) return; /* the work has already been scheduled */ usb_autopm_put_interface_async(intf); - kref_put(&hub->kref, hub_release); + hub_put(hub); } void usb_kick_hub_wq(struct usb_device *hdev) @@ -1060,7 +1059,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) goto init2; goto init3; } - kref_get(&hub->kref); + hub_get(hub); /* The superspeed hub except for root hub has to use Hub Depth * value as an offset into the route string to locate the bits @@ -1308,7 +1307,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) device_unlock(&hdev->dev); } - kref_put(&hub->kref, hub_release); + hub_put(hub); } /* Implement the continuations for the delays above */ @@ -1724,6 +1723,16 @@ static void hub_release(struct kref *kref) kfree(hub); } +void hub_get(struct usb_hub *hub) +{ + kref_get(&hub->kref); +} + +void hub_put(struct usb_hub *hub) +{ + kref_put(&hub->kref, hub_release); +} + static unsigned highspeed_hubs; static void hub_disconnect(struct usb_interface *intf) @@ -1772,7 +1781,7 @@ static void hub_disconnect(struct usb_interface *intf) onboard_hub_destroy_pdevs(&hub->onboard_hub_devs); - kref_put(&hub->kref, hub_release); + hub_put(hub); } static bool hub_descriptor_is_sane(struct usb_host_interface *desc) @@ -5874,7 +5883,7 @@ out_hdev_lock: /* Balance the stuff in kick_hub_wq() and allow autosuspend */ usb_autopm_put_interface(intf); - kref_put(&hub->kref, hub_release); + hub_put(hub); kcov_remote_stop(); } diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index bc66205ca..1085c7233 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -118,6 +118,8 @@ extern void usb_hub_remove_port_device(struct usb_hub *hub, extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub, int port1, bool set); extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev); +extern void hub_get(struct usb_hub *hub); +extern void hub_put(struct usb_hub *hub); extern int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected); extern int usb_clear_port_feature(struct usb_device *hdev, diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 38c1a4f4f..e91fa567d 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -28,11 +28,22 @@ static ssize_t disable_show(struct device *dev, u16 portstatus, unused; bool disabled; int rc; + struct kernfs_node *kn; + hub_get(hub); rc = usb_autopm_get_interface(intf); if (rc < 0) - return rc; + goto out_hub_get; + /* + * Prevent deadlock if another process is concurrently + * trying to unregister hdev. + */ + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); + if (!kn) { + rc = -ENODEV; + goto out_autopm; + } usb_lock_device(hdev); if (hub->disconnected) { rc = -ENODEV; @@ -42,9 +53,13 @@ static ssize_t disable_show(struct device *dev, usb_hub_port_status(hub, port1, &portstatus, &unused); disabled = !usb_port_is_power_on(hub, portstatus); -out_hdev_lock: + out_hdev_lock: usb_unlock_device(hdev); + sysfs_unbreak_active_protection(kn); + out_autopm: usb_autopm_put_interface(intf); + out_hub_get: + hub_put(hub); if (rc) return rc; @@ -62,15 +77,26 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr, int port1 = port_dev->portnum; bool disabled; int rc; + struct kernfs_node *kn; rc = strtobool(buf, &disabled); if (rc) return rc; + hub_get(hub); rc = usb_autopm_get_interface(intf); if (rc < 0) - return rc; + goto out_hub_get; + /* + * Prevent deadlock if another process is concurrently + * trying to unregister hdev. + */ + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); + if (!kn) { + rc = -ENODEV; + goto out_autopm; + } usb_lock_device(hdev); if (hub->disconnected) { rc = -ENODEV; @@ -91,9 +117,13 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr, if (!rc) rc = count; -out_hdev_lock: + out_hdev_lock: usb_unlock_device(hdev); + sysfs_unbreak_active_protection(kn); + out_autopm: usb_autopm_put_interface(intf); + out_hub_get: + hub_put(hub); return rc; } @@ -534,7 +564,7 @@ static int match_location(struct usb_device *peer_hdev, void *p) struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev); struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent); - if (!peer_hub) + if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED) return 0; hcd = bus_to_hcd(hdev->bus); @@ -545,7 +575,8 @@ static int match_location(struct usb_device *peer_hdev, void *p) for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) { peer = peer_hub->ports[port1 - 1]; - if (peer && peer->location == port_dev->location) { + if (peer && peer->connect_type != USB_PORT_NOT_USED && + peer->location == port_dev->location) { link_peers_report(port_dev, peer); return 1; /* done */ } diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index ccf6cd972..5f1e07341 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -1170,14 +1170,24 @@ static ssize_t interface_authorized_store(struct device *dev, { struct usb_interface *intf = to_usb_interface(dev); bool val; + struct kernfs_node *kn; if (strtobool(buf, &val) != 0) return -EINVAL; - if (val) + if (val) { usb_authorize_interface(intf); - else - usb_deauthorize_interface(intf); + } else { + /* + * Prevent deadlock if another process is concurrently + * trying to unregister intf. + */ + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); + if (kn) { + usb_deauthorize_interface(intf); + sysfs_unbreak_active_protection(kn); + } + } return count; } diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 40cf2880d..b106c0e0b 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -729,8 +729,14 @@ struct dwc2_dregs_backup { * struct dwc2_hregs_backup - Holds host registers state before * entering partial power down * @hcfg: Backup of HCFG register + * @hflbaddr: Backup of HFLBADDR register * @haintmsk: Backup of HAINTMSK register + * @hcchar: Backup of HCCHAR register + * @hcsplt: Backup of HCSPLT register * @hcintmsk: Backup of HCINTMSK register + * @hctsiz: Backup of HCTSIZ register + * @hdma: Backup of HCDMA register + * @hcdmab: Backup of HCDMAB register * @hprt0: Backup of HPTR0 register * @hfir: Backup of HFIR register * @hptxfsiz: Backup of HPTXFSIZ register @@ -738,8 +744,14 @@ struct dwc2_dregs_backup { */ struct dwc2_hregs_backup { u32 hcfg; + u32 hflbaddr; u32 haintmsk; + u32 hcchar[MAX_EPS_CHANNELS]; + u32 hcsplt[MAX_EPS_CHANNELS]; u32 hcintmsk[MAX_EPS_CHANNELS]; + u32 hctsiz[MAX_EPS_CHANNELS]; + u32 hcidma[MAX_EPS_CHANNELS]; + u32 hcidmab[MAX_EPS_CHANNELS]; u32 hprt0; u32 hfir; u32 hptxfsiz; @@ -1084,6 +1096,7 @@ struct dwc2_hsotg { bool needs_byte_swap; /* DWC OTG HW Release versions */ +#define DWC2_CORE_REV_4_30a 0x4f54430a #define DWC2_CORE_REV_2_71a 0x4f54271a #define DWC2_CORE_REV_2_72a 0x4f54272a #define DWC2_CORE_REV_2_80a 0x4f54280a @@ -1321,6 +1334,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg); int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg); void dwc2_enable_acg(struct dwc2_hsotg *hsotg); +void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup); /* This function should be called on every hardware interrupt. */ irqreturn_t dwc2_handle_common_intr(int irq, void *dev); diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 158ede753..26d752a4c 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -297,7 +297,8 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) /* Exit gadget mode clock gating. */ if (hsotg->params.power_down == - DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended) + DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended && + !hsotg->params.no_clock_gating) dwc2_gadget_exit_clock_gating(hsotg, 0); } @@ -322,10 +323,11 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg) * @hsotg: Programming view of DWC_otg controller * */ -static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg) +void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup) { u32 glpmcfg; - u32 i = 0; + u32 pcgctl; + u32 dctl; if (hsotg->lx_state != DWC2_L1) { dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n"); @@ -334,37 +336,57 @@ static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg) glpmcfg = dwc2_readl(hsotg, GLPMCFG); if (dwc2_is_device_mode(hsotg)) { - dev_dbg(hsotg->dev, "Exit from L1 state\n"); + dev_dbg(hsotg->dev, "Exit from L1 state, remotewakeup=%d\n", remotewakeup); glpmcfg &= ~GLPMCFG_ENBLSLPM; - glpmcfg &= ~GLPMCFG_HIRD_THRES_EN; + glpmcfg &= ~GLPMCFG_HIRD_THRES_MASK; dwc2_writel(hsotg, glpmcfg, GLPMCFG); - do { - glpmcfg = dwc2_readl(hsotg, GLPMCFG); + pcgctl = dwc2_readl(hsotg, PCGCTL); + pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING; + dwc2_writel(hsotg, pcgctl, PCGCTL); - if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK | - GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS))) - break; + glpmcfg = dwc2_readl(hsotg, GLPMCFG); + if (glpmcfg & GLPMCFG_ENBESL) { + glpmcfg |= GLPMCFG_RSTRSLPSTS; + dwc2_writel(hsotg, glpmcfg, GLPMCFG); + } + + if (remotewakeup) { + if (dwc2_hsotg_wait_bit_set(hsotg, GLPMCFG, GLPMCFG_L1RESUMEOK, 1000)) { + dev_warn(hsotg->dev, "%s: timeout GLPMCFG_L1RESUMEOK\n", __func__); + goto fail; + return; + } + + dctl = dwc2_readl(hsotg, DCTL); + dctl |= DCTL_RMTWKUPSIG; + dwc2_writel(hsotg, dctl, DCTL); - udelay(1); - } while (++i < 200); + if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_WKUPINT, 1000)) { + dev_warn(hsotg->dev, "%s: timeout GINTSTS_WKUPINT\n", __func__); + goto fail; + return; + } + } - if (i == 200) { - dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n"); + glpmcfg = dwc2_readl(hsotg, GLPMCFG); + if (glpmcfg & GLPMCFG_COREL1RES_MASK || glpmcfg & GLPMCFG_SLPSTS || + glpmcfg & GLPMCFG_L1RESUMEOK) { + goto fail; return; } - dwc2_gadget_init_lpm(hsotg); + + /* Inform gadget to exit from L1 */ + call_gadget(hsotg, resume); + /* Change to L0 state */ + hsotg->lx_state = DWC2_L0; + hsotg->bus_suspended = false; +fail: dwc2_gadget_init_lpm(hsotg); } else { /* TODO */ dev_err(hsotg->dev, "Host side LPM is not supported.\n"); return; } - - /* Change to L0 state */ - hsotg->lx_state = DWC2_L0; - - /* Inform gadget to exit from L1 */ - call_gadget(hsotg, resume); } /* @@ -385,7 +407,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state); if (hsotg->lx_state == DWC2_L1) { - dwc2_wakeup_from_lpm_l1(hsotg); + dwc2_wakeup_from_lpm_l1(hsotg, false); return; } @@ -408,7 +430,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) /* Exit gadget mode clock gating. */ if (hsotg->params.power_down == - DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended) + DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended && + !hsotg->params.no_clock_gating) dwc2_gadget_exit_clock_gating(hsotg, 0); } else { /* Change to L0 state */ @@ -425,7 +448,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) } if (hsotg->params.power_down == - DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended) + DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended && + !hsotg->params.no_clock_gating) dwc2_host_exit_clock_gating(hsotg, 1); /* diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 8b15742d9..cb29f9fae 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1416,6 +1416,10 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, ep->name, req, req->length, req->buf, req->no_interrupt, req->zero, req->short_not_ok); + if (hs->lx_state == DWC2_L1) { + dwc2_wakeup_from_lpm_l1(hs, true); + } + /* Prevent new request submission when controller is suspended */ if (hs->lx_state != DWC2_L0) { dev_dbg(hs->dev, "%s: submit request only in active state\n", @@ -3728,6 +3732,12 @@ irq_retry: if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2) dwc2_exit_partial_power_down(hsotg, 0, true); + /* Exit gadget mode clock gating. */ + if (hsotg->params.power_down == + DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended && + !hsotg->params.no_clock_gating) + dwc2_gadget_exit_clock_gating(hsotg, 0); + hsotg->lx_state = DWC2_L0; } diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 35c7a4df8..dd5b1c569 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -2701,8 +2701,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( hsotg->available_host_channels--; } qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); - if (dwc2_assign_and_init_hc(hsotg, qh)) + if (dwc2_assign_and_init_hc(hsotg, qh)) { + if (hsotg->params.uframe_sched) + hsotg->available_host_channels++; break; + } /* * Move the QH from the periodic ready schedule to the @@ -2735,8 +2738,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( hsotg->available_host_channels--; } - if (dwc2_assign_and_init_hc(hsotg, qh)) + if (dwc2_assign_and_init_hc(hsotg, qh)) { + if (hsotg->params.uframe_sched) + hsotg->available_host_channels++; break; + } /* * Move the QH from the non-periodic inactive schedule to the @@ -4143,6 +4149,8 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, urb->actual_length); if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + if (!hsotg->params.dma_desc_enable) + urb->start_frame = qtd->qh->start_active_frame; urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb); for (i = 0; i < urb->number_of_packets; ++i) { urb->iso_frame_desc[i].actual_length = @@ -4649,7 +4657,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, } if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && - hsotg->bus_suspended) { + hsotg->bus_suspended && !hsotg->params.no_clock_gating) { if (dwc2_is_device_mode(hsotg)) dwc2_gadget_exit_clock_gating(hsotg, 0); else @@ -5406,9 +5414,16 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) /* Backup Host regs */ hr = &hsotg->hr_backup; hr->hcfg = dwc2_readl(hsotg, HCFG); + hr->hflbaddr = dwc2_readl(hsotg, HFLBADDR); hr->haintmsk = dwc2_readl(hsotg, HAINTMSK); - for (i = 0; i < hsotg->params.host_channels; ++i) + for (i = 0; i < hsotg->params.host_channels; ++i) { + hr->hcchar[i] = dwc2_readl(hsotg, HCCHAR(i)); + hr->hcsplt[i] = dwc2_readl(hsotg, HCSPLT(i)); hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i)); + hr->hctsiz[i] = dwc2_readl(hsotg, HCTSIZ(i)); + hr->hcidma[i] = dwc2_readl(hsotg, HCDMA(i)); + hr->hcidmab[i] = dwc2_readl(hsotg, HCDMAB(i)); + } hr->hprt0 = dwc2_read_hprt0(hsotg); hr->hfir = dwc2_readl(hsotg, HFIR); @@ -5442,10 +5457,17 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) hr->valid = false; dwc2_writel(hsotg, hr->hcfg, HCFG); + dwc2_writel(hsotg, hr->hflbaddr, HFLBADDR); dwc2_writel(hsotg, hr->haintmsk, HAINTMSK); - for (i = 0; i < hsotg->params.host_channels; ++i) + for (i = 0; i < hsotg->params.host_channels; ++i) { + dwc2_writel(hsotg, hr->hcchar[i], HCCHAR(i)); + dwc2_writel(hsotg, hr->hcsplt[i], HCSPLT(i)); dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i)); + dwc2_writel(hsotg, hr->hctsiz[i], HCTSIZ(i)); + dwc2_writel(hsotg, hr->hcidma[i], HCDMA(i)); + dwc2_writel(hsotg, hr->hcidmab[i], HCDMAB(i)); + } dwc2_writel(hsotg, hr->hprt0, HPRT0); dwc2_writel(hsotg, hr->hfir, HFIR); @@ -5610,10 +5632,12 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup, dwc2_writel(hsotg, hr->hcfg, HCFG); /* De-assert Wakeup Logic */ - gpwrdn = dwc2_readl(hsotg, GPWRDN); - gpwrdn &= ~GPWRDN_PMUACTV; - dwc2_writel(hsotg, gpwrdn, GPWRDN); - udelay(10); + if (!(rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) { + gpwrdn = dwc2_readl(hsotg, GPWRDN); + gpwrdn &= ~GPWRDN_PMUACTV; + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + } hprt0 = hr->hprt0; hprt0 |= HPRT0_PWR; @@ -5638,6 +5662,13 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup, hprt0 |= HPRT0_RES; dwc2_writel(hsotg, hprt0, HPRT0); + /* De-assert Wakeup Logic */ + if ((rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) { + gpwrdn = dwc2_readl(hsotg, GPWRDN); + gpwrdn &= ~GPWRDN_PMUACTV; + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + } /* Wait for Resume time and then program HPRT again */ mdelay(100); hprt0 &= ~HPRT0_RES; diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c index 6b4d825e9..79582b102 100644 --- a/drivers/usb/dwc2/hcd_ddma.c +++ b/drivers/usb/dwc2/hcd_ddma.c @@ -559,7 +559,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, idx = qh->td_last; inc = qh->host_interval; hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); - cur_idx = dwc2_frame_list_idx(hsotg->frame_number); + cur_idx = idx; next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); /* @@ -866,6 +866,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, { struct dwc2_dma_desc *dma_desc; struct dwc2_hcd_iso_packet_desc *frame_desc; + u16 frame_desc_idx; + struct urb *usb_urb = qtd->urb->priv; u16 remain = 0; int rc = 0; @@ -878,8 +880,11 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, DMA_FROM_DEVICE); dma_desc = &qh->desc_list[idx]; + frame_desc_idx = (idx - qtd->isoc_td_first) & (usb_urb->number_of_packets - 1); - frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; + frame_desc = &qtd->urb->iso_descs[frame_desc_idx]; + if (idx == qtd->isoc_td_first) + usb_urb->start_frame = dwc2_hcd_get_frame_number(hsotg); dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); if (chan->ep_is_in) remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> @@ -900,7 +905,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, frame_desc->status = 0; } - if (++qtd->isoc_frame_index == qtd->urb->packet_count) { + if (++qtd->isoc_frame_index == usb_urb->number_of_packets) { /* * urb->status is not used for isoc transfers here. The * individual frame_desc status are used instead. @@ -1005,11 +1010,11 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, return; idx = dwc2_desclist_idx_inc(idx, qh->host_interval, chan->speed); - if (!rc) + if (rc == 0) continue; - if (rc == DWC2_CMPL_DONE) - break; + if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP) + goto stop_scan; /* rc == DWC2_CMPL_STOP */ diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h index 13abdd5f6..12f8c7f86 100644 --- a/drivers/usb/dwc2/hw.h +++ b/drivers/usb/dwc2/hw.h @@ -698,7 +698,7 @@ #define TXSTS_QTOP_TOKEN_MASK (0x3 << 25) #define TXSTS_QTOP_TOKEN_SHIFT 25 #define TXSTS_QTOP_TERMINATE BIT(24) -#define TXSTS_QSPCAVAIL_MASK (0xff << 16) +#define TXSTS_QSPCAVAIL_MASK (0x7f << 16) #define TXSTS_QSPCAVAIL_SHIFT 16 #define TXSTS_FSPCAVAIL_MASK (0xffff << 0) #define TXSTS_FSPCAVAIL_SHIFT 0 diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 58f53faab..2e4c6884f 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -298,7 +298,7 @@ static int dwc2_driver_remove(struct platform_device *dev) /* Exit clock gating when driver is removed. */ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && - hsotg->bus_suspended) { + hsotg->bus_suspended && !hsotg->params.no_clock_gating) { if (dwc2_is_device_mode(hsotg)) dwc2_gadget_exit_clock_gating(hsotg, 0); else diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c index 173cf3579..ad8a2eadb 100644 --- a/drivers/usb/dwc3/dwc3-am62.c +++ b/drivers/usb/dwc3/dwc3-am62.c @@ -89,7 +89,7 @@ #define DWC3_AM62_AUTOSUSPEND_DELAY 100 -struct dwc3_data { +struct dwc3_am62 { struct device *dev; void __iomem *usbss; struct clk *usb2_refclk; @@ -115,19 +115,19 @@ static const int dwc3_ti_rate_table[] = { /* in KHZ */ 52000, }; -static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset) +static inline u32 dwc3_ti_readl(struct dwc3_am62 *am62, u32 offset) { - return readl((data->usbss) + offset); + return readl((am62->usbss) + offset); } -static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value) +static inline void dwc3_ti_writel(struct dwc3_am62 *am62, u32 offset, u32 value) { - writel(value, (data->usbss) + offset); + writel(value, (am62->usbss) + offset); } -static int phy_syscon_pll_refclk(struct dwc3_data *data) +static int phy_syscon_pll_refclk(struct dwc3_am62 *am62) { - struct device *dev = data->dev; + struct device *dev = am62->dev; struct device_node *node = dev->of_node; struct of_phandle_args args; struct regmap *syscon; @@ -139,16 +139,16 @@ static int phy_syscon_pll_refclk(struct dwc3_data *data) return PTR_ERR(syscon); } - data->syscon = syscon; + am62->syscon = syscon; ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1, 0, &args); if (ret) return ret; - data->offset = args.args[0]; + am62->offset = args.args[0]; - ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code); + ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code); if (ret) { dev_err(dev, "failed to set phy pll reference clock rate\n"); return ret; @@ -161,32 +161,32 @@ static int dwc3_ti_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = pdev->dev.of_node; - struct dwc3_data *data; + struct dwc3_am62 *am62; int i, ret; unsigned long rate; u32 reg; - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) + am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL); + if (!am62) return -ENOMEM; - data->dev = dev; - platform_set_drvdata(pdev, data); + am62->dev = dev; + platform_set_drvdata(pdev, am62); - data->usbss = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(data->usbss)) { + am62->usbss = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(am62->usbss)) { dev_err(dev, "can't map IOMEM resource\n"); - return PTR_ERR(data->usbss); + return PTR_ERR(am62->usbss); } - data->usb2_refclk = devm_clk_get(dev, "ref"); - if (IS_ERR(data->usb2_refclk)) { + am62->usb2_refclk = devm_clk_get(dev, "ref"); + if (IS_ERR(am62->usb2_refclk)) { dev_err(dev, "can't get usb2_refclk\n"); - return PTR_ERR(data->usb2_refclk); + return PTR_ERR(am62->usb2_refclk); } /* Calculate the rate code */ - rate = clk_get_rate(data->usb2_refclk); + rate = clk_get_rate(am62->usb2_refclk); rate /= 1000; // To KHz for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) { if (dwc3_ti_rate_table[i] == rate) @@ -198,20 +198,20 @@ static int dwc3_ti_probe(struct platform_device *pdev) return -EINVAL; } - data->rate_code = i; + am62->rate_code = i; /* Read the syscon property and set the rate code */ - ret = phy_syscon_pll_refclk(data); + ret = phy_syscon_pll_refclk(am62); if (ret) return ret; /* VBUS divider select */ - data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider"); - reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG); - if (data->vbus_divider) + am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider"); + reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG); + if (am62->vbus_divider) reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT; - dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg); + dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg); pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -219,7 +219,7 @@ static int dwc3_ti_probe(struct platform_device *pdev) * Don't ignore its dependencies with its children */ pm_suspend_ignore_children(dev, false); - clk_prepare_enable(data->usb2_refclk); + clk_prepare_enable(am62->usb2_refclk); pm_runtime_get_noresume(dev); ret = of_platform_populate(node, NULL, NULL, dev); @@ -229,9 +229,9 @@ static int dwc3_ti_probe(struct platform_device *pdev) } /* Set mode valid bit to indicate role is valid */ - reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL); + reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL); reg |= USBSS_MODE_VALID; - dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg); + dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg); /* Setting up autosuspend */ pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY); @@ -241,35 +241,27 @@ static int dwc3_ti_probe(struct platform_device *pdev) return 0; err_pm_disable: - clk_disable_unprepare(data->usb2_refclk); + clk_disable_unprepare(am62->usb2_refclk); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); return ret; } -static int dwc3_ti_remove_core(struct device *dev, void *c) -{ - struct platform_device *pdev = to_platform_device(dev); - - platform_device_unregister(pdev); - return 0; -} - static int dwc3_ti_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct dwc3_data *data = platform_get_drvdata(pdev); + struct dwc3_am62 *am62 = platform_get_drvdata(pdev); u32 reg; - device_for_each_child(dev, NULL, dwc3_ti_remove_core); + pm_runtime_get_sync(dev); + of_platform_depopulate(dev); /* Clear mode valid bit */ - reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL); + reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL); reg &= ~USBSS_MODE_VALID; - dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg); + dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg); pm_runtime_put_sync(dev); - clk_disable_unprepare(data->usb2_refclk); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); @@ -280,18 +272,18 @@ static int dwc3_ti_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int dwc3_ti_suspend_common(struct device *dev) { - struct dwc3_data *data = dev_get_drvdata(dev); + struct dwc3_am62 *am62 = dev_get_drvdata(dev); - clk_disable_unprepare(data->usb2_refclk); + clk_disable_unprepare(am62->usb2_refclk); return 0; } static int dwc3_ti_resume_common(struct device *dev) { - struct dwc3_data *data = dev_get_drvdata(dev); + struct dwc3_am62 *am62 = dev_get_drvdata(dev); - clk_prepare_enable(data->usb2_refclk); + clk_prepare_enable(am62->usb2_refclk); return 0; } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 5e78fcc63..14601a2d2 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1348,7 +1348,7 @@ parse_ntb: if (to_process == 1 && (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) { to_process--; - } else if (to_process > 0) { + } else if ((to_process > 0) && (block_len != 0)) { ntb_ptr = (unsigned char *)(ntb_ptr + block_len); goto parse_ntb; } diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 0edd9e53f..82a10774a 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -292,7 +292,9 @@ int usb_ep_queue(struct usb_ep *ep, { int ret = 0; - if (WARN_ON_ONCE(!ep->enabled && ep->address)) { + if (!ep->enabled && ep->address) { + pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n", + ep->address, ep->name); ret = -ESHUTDOWN; goto out; } diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 538c1b9a2..c42d5aa99 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev) goto err_req; } - ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW); + ret = net2272_probe_fin(dev, irqflags); if (ret) goto err_io; diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index a8cadc45c..fd7a95359 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -3486,8 +3486,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc) static int tegra_xudc_phy_get(struct tegra_xudc *xudc) { - int err = 0, usb3; - unsigned int i; + int err = 0, usb3_companion_port; + unsigned int i, j; xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys, sizeof(*xudc->utmi_phy), GFP_KERNEL); @@ -3515,7 +3515,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) if (IS_ERR(xudc->utmi_phy[i])) { err = PTR_ERR(xudc->utmi_phy[i]); dev_err_probe(xudc->dev, err, - "failed to get usb2-%d PHY\n", i); + "failed to get PHY for phy-name usb2-%d\n", i); goto clean_up; } else if (xudc->utmi_phy[i]) { /* Get usb-phy, if utmi phy is available */ @@ -3534,19 +3534,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) } /* Get USB3 phy */ - usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i); - if (usb3 < 0) + usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i); + if (usb3_companion_port < 0) continue; - snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3); - xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); - if (IS_ERR(xudc->usb3_phy[i])) { - err = PTR_ERR(xudc->usb3_phy[i]); - dev_err_probe(xudc->dev, err, - "failed to get usb3-%d PHY\n", usb3); - goto clean_up; - } else if (xudc->usb3_phy[i]) - dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3); + for (j = 0; j < xudc->soc->num_phys; j++) { + snprintf(phy_name, sizeof(phy_name), "usb3-%d", j); + xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); + if (IS_ERR(xudc->usb3_phy[i])) { + err = PTR_ERR(xudc->usb3_phy[i]); + dev_err_probe(xudc->dev, err, + "failed to get PHY for phy-name usb3-%d\n", j); + goto clean_up; + } else if (xudc->usb3_phy[i]) { + int usb2_port = + tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]); + int usb3_port = + tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]); + if (usb3_port == usb3_companion_port) { + dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n", + usb2_port, usb3_port, i); + break; + } + } + } } return err; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index c02ad4f76..565aba6b9 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1334,6 +1334,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) temp = kzalloc_node(buf_len, GFP_ATOMIC, dev_to_node(hcd->self.sysdev)); + if (!temp) + return -ENOMEM; if (usb_urb_dir_out(urb)) sg_pcopy_to_buffer(urb->sg, urb->num_sgs, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index b3e60b384..aa30288c8 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ + { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */ + { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ @@ -144,6 +146,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ + { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */ @@ -177,6 +180,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ + { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index fe2173e37..248cbc9c4 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + /* GMC devices */ + { USB_DEVICE(GMC_VID, GMC_Z216C_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 21a2b5a25..5ee60ba2a 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1606,3 +1606,9 @@ #define UBLOX_VID 0x1546 #define UBLOX_C099F9P_ZED_PID 0x0502 #define UBLOX_C099F9P_ODIN_PID 0x0503 + +/* + * GMC devices + */ +#define GMC_VID 0x1cd7 +#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c0a0cca65..1a3e5a941 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb); /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ #define LUAT_PRODUCT_AIR720U 0x4e00 +/* MeiG Smart Technology products */ +#define MEIGSMART_VENDOR_ID 0x2dee +/* MeiG Smart SLM320 based on UNISOC UIS8910 */ +#define MEIGSMART_PRODUCT_SLM320 0x4d41 + /* Device flags */ /* Highest interface number which can be used with NCTRL() and RSVD() */ @@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index 4e0eef144..300aeef16 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id) static int isd200_get_inquiry_data( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; - int retStatus = ISD200_GOOD; + int retStatus; u16 *id = info->id; usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n"); @@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us ) isd200_fix_driveid(id); isd200_dump_driveid(us, id); + /* Prevent division by 0 in isd200_scsi_to_ata() */ + if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) { + usb_stor_dbg(us, " Invalid ATA Identify data\n"); + retStatus = ISD200_ERROR; + goto Done; + } + memset(&info->InquiryData, 0, sizeof(info->InquiryData)); /* Standard IDE interface only supports disks */ @@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us ) } } + Done: usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus); return(retStatus); @@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us) static int isd200_Initialization(struct us_data *us) { + int rc = 0; + usb_stor_dbg(us, "ISD200 Initialization...\n"); /* Initialize ISD200 info struct */ - if (isd200_init_info(us) == ISD200_ERROR) { + if (isd200_init_info(us) < 0) { usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n"); + rc = -ENOMEM; } else { /* Get device specific data */ - if (isd200_get_inquiry_data(us) != ISD200_GOOD) + if (isd200_get_inquiry_data(us) != ISD200_GOOD) { usb_stor_dbg(us, "ISD200 Initialization Failure\n"); - else + rc = -EINVAL; + } else { usb_stor_dbg(us, "ISD200 Initialization complete\n"); + } } - return 0; + return rc; } diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index ed22053b3..af619efe8 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -533,7 +533,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp, * daft to me. */ -static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp) +static int uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp) { struct uas_dev_info *devinfo = cmnd->device->hostdata; struct urb *urb; @@ -541,30 +541,28 @@ static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp) urb = uas_alloc_sense_urb(devinfo, gfp, cmnd); if (!urb) - return NULL; + return -ENOMEM; usb_anchor_urb(urb, &devinfo->sense_urbs); err = usb_submit_urb(urb, gfp); if (err) { usb_unanchor_urb(urb); uas_log_cmd_state(cmnd, "sense submit err", err); usb_free_urb(urb); - return NULL; } - return urb; + return err; } static int uas_submit_urbs(struct scsi_cmnd *cmnd, struct uas_dev_info *devinfo) { struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd); - struct urb *urb; int err; lockdep_assert_held(&devinfo->lock); if (cmdinfo->state & SUBMIT_STATUS_URB) { - urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC); - if (!urb) - return SCSI_MLQUEUE_DEVICE_BUSY; + err = uas_submit_sense_urb(cmnd, GFP_ATOMIC); + if (err) + return err; cmdinfo->state &= ~SUBMIT_STATUS_URB; } @@ -572,7 +570,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC, cmnd, DMA_FROM_DEVICE); if (!cmdinfo->data_in_urb) - return SCSI_MLQUEUE_DEVICE_BUSY; + return -ENOMEM; cmdinfo->state &= ~ALLOC_DATA_IN_URB; } @@ -582,7 +580,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, if (err) { usb_unanchor_urb(cmdinfo->data_in_urb); uas_log_cmd_state(cmnd, "data in submit err", err); - return SCSI_MLQUEUE_DEVICE_BUSY; + return err; } cmdinfo->state &= ~SUBMIT_DATA_IN_URB; cmdinfo->state |= DATA_IN_URB_INFLIGHT; @@ -592,7 +590,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC, cmnd, DMA_TO_DEVICE); if (!cmdinfo->data_out_urb) - return SCSI_MLQUEUE_DEVICE_BUSY; + return -ENOMEM; cmdinfo->state &= ~ALLOC_DATA_OUT_URB; } @@ -602,7 +600,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, if (err) { usb_unanchor_urb(cmdinfo->data_out_urb); uas_log_cmd_state(cmnd, "data out submit err", err); - return SCSI_MLQUEUE_DEVICE_BUSY; + return err; } cmdinfo->state &= ~SUBMIT_DATA_OUT_URB; cmdinfo->state |= DATA_OUT_URB_INFLIGHT; @@ -611,7 +609,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, if (cmdinfo->state & ALLOC_CMD_URB) { cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd); if (!cmdinfo->cmd_urb) - return SCSI_MLQUEUE_DEVICE_BUSY; + return -ENOMEM; cmdinfo->state &= ~ALLOC_CMD_URB; } @@ -621,7 +619,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, if (err) { usb_unanchor_urb(cmdinfo->cmd_urb); uas_log_cmd_state(cmnd, "cmd submit err", err); - return SCSI_MLQUEUE_DEVICE_BUSY; + return err; } cmdinfo->cmd_urb = NULL; cmdinfo->state &= ~SUBMIT_CMD_URB; @@ -698,7 +696,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd) * of queueing, no matter how fatal the error */ if (err == -ENODEV) { - set_host_byte(cmnd, DID_ERROR); + set_host_byte(cmnd, DID_NO_CONNECT); scsi_done(cmnd); goto zombie; } diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 3da404d51..ce83f558f 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1245,6 +1245,7 @@ static ssize_t select_usb_power_delivery_store(struct device *dev, { struct typec_port *port = to_typec_port(dev); struct usb_power_delivery *pd; + int ret; if (!port->ops || !port->ops->pd_set) return -EOPNOTSUPP; @@ -1253,7 +1254,11 @@ static ssize_t select_usb_power_delivery_store(struct device *dev, if (!pd) return -EINVAL; - return port->ops->pd_set(port, pd); + ret = port->ops->pd_set(port, pd); + if (ret) + return ret; + + return size; } static ssize_t select_usb_power_delivery_show(struct device *dev, diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 0695ee54f..98f335cbb 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -138,8 +138,12 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) if (!(cci & UCSI_CCI_COMMAND_COMPLETE)) return -EIO; - if (cci & UCSI_CCI_NOT_SUPPORTED) + if (cci & UCSI_CCI_NOT_SUPPORTED) { + if (ucsi_acknowledge_command(ucsi) < 0) + dev_err(ucsi->dev, + "ACK of unsupported command failed\n"); return -EOPNOTSUPP; + } if (cci & UCSI_CCI_ERROR) { if (cmd == UCSI_GET_ERROR_STATUS) @@ -829,11 +833,11 @@ static void ucsi_handle_connector_change(struct work_struct *work) if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) ucsi_partner_task(con, ucsi_check_altmodes, 1, 0); - clear_bit(EVENT_PENDING, &con->ucsi->flags); - mutex_lock(&ucsi->ppm_lock); + clear_bit(EVENT_PENDING, &con->ucsi->flags); ret = ucsi_acknowledge_connector_change(ucsi); mutex_unlock(&ucsi->ppm_lock); + if (ret) dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret); @@ -874,13 +878,47 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard) static int ucsi_reset_ppm(struct ucsi *ucsi) { - u64 command = UCSI_PPM_RESET; + u64 command; unsigned long tmo; u32 cci; int ret; mutex_lock(&ucsi->ppm_lock); + ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); + if (ret < 0) + goto out; + + /* + * If UCSI_CCI_RESET_COMPLETE is already set we must clear + * the flag before we start another reset. Send a + * UCSI_SET_NOTIFICATION_ENABLE command to achieve this. + * Ignore a timeout and try the reset anyway if this fails. + */ + if (cci & UCSI_CCI_RESET_COMPLETE) { + command = UCSI_SET_NOTIFICATION_ENABLE; + ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command, + sizeof(command)); + if (ret < 0) + goto out; + + tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); + do { + ret = ucsi->ops->read(ucsi, UCSI_CCI, + &cci, sizeof(cci)); + if (ret < 0) + goto out; + if (cci & UCSI_CCI_COMMAND_COMPLETE) + break; + if (time_is_before_jiffies(tmo)) + break; + msleep(20); + } while (1); + + WARN_ON(cci & UCSI_CCI_RESET_COMPLETE); + } + + command = UCSI_PPM_RESET; ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command, sizeof(command)); if (ret < 0) diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 60ce9fb6e..dbb10cb31 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -220,12 +220,12 @@ struct ucsi_cable_property { #define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0) #define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1) #define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2) -#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0)) +#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3) #define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0 #define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1 #define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2 #define UCSI_CABLE_PROPERTY_PLUG_OTHER 3 -#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5) +#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5) u8 latency; } __packed; diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 48130d636..b4d86d47c 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -23,10 +23,11 @@ struct ucsi_acpi { void *base; struct completion complete; unsigned long flags; +#define UCSI_ACPI_SUPPRESS_EVENT 0 +#define UCSI_ACPI_COMMAND_PENDING 1 +#define UCSI_ACPI_ACK_PENDING 2 guid_t guid; u64 cmd; - bool dell_quirk_probed; - bool dell_quirk_active; }; static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func) @@ -79,9 +80,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, int ret; if (ack) - set_bit(ACK_PENDING, &ua->flags); + set_bit(UCSI_ACPI_ACK_PENDING, &ua->flags); else - set_bit(COMMAND_PENDING, &ua->flags); + set_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags); ret = ucsi_acpi_async_write(ucsi, offset, val, val_len); if (ret) @@ -92,9 +93,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, out_clear_bit: if (ack) - clear_bit(ACK_PENDING, &ua->flags); + clear_bit(UCSI_ACPI_ACK_PENDING, &ua->flags); else - clear_bit(COMMAND_PENDING, &ua->flags); + clear_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags); return ret; } @@ -129,51 +130,40 @@ static const struct ucsi_operations ucsi_zenbook_ops = { }; /* - * Some Dell laptops expect that an ACK command with the - * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate) - * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set. - * If this is not done events are not delivered to OSPM and - * subsequent commands will timeout. + * Some Dell laptops don't like ACK commands with the + * UCSI_ACK_CONNECTOR_CHANGE but not the UCSI_ACK_COMMAND_COMPLETE + * bit set. To work around this send a dummy command and bundle the + * UCSI_ACK_CONNECTOR_CHANGE with the UCSI_ACK_COMMAND_COMPLETE + * for the dummy command. */ static int ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset, const void *val, size_t val_len) { struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); - u64 cmd = *(u64 *)val, ack = 0; + u64 cmd = *(u64 *)val; + u64 dummycmd = UCSI_GET_CAPABILITY; int ret; - if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI && - cmd & UCSI_ACK_CONNECTOR_CHANGE) - ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE; - - ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len); - if (ret != 0) - return ret; - if (ack == 0) - return ret; - - if (!ua->dell_quirk_probed) { - ua->dell_quirk_probed = true; - - cmd = UCSI_GET_CAPABILITY; - ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, - sizeof(cmd)); - if (ret == 0) - return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, - &ack, sizeof(ack)); - if (ret != -ETIMEDOUT) + if (cmd == (UCSI_ACK_CC_CI | UCSI_ACK_CONNECTOR_CHANGE)) { + cmd |= UCSI_ACK_COMMAND_COMPLETE; + + /* + * The UCSI core thinks it is sending a connector change ack + * and will accept new connector change events. We don't want + * this to happen for the dummy command as its response will + * still report the very event that the core is trying to clear. + */ + set_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags); + ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &dummycmd, + sizeof(dummycmd)); + clear_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags); + + if (ret < 0) return ret; - - ua->dell_quirk_active = true; - dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n"); - dev_err(ua->dev, "Firmware bug: Enabling workaround\n"); } - if (!ua->dell_quirk_active) - return ret; - - return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack)); + return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd)); } static const struct ucsi_operations ucsi_dell_ops = { @@ -209,13 +199,14 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) if (ret) return; - if (UCSI_CCI_CONNECTOR(cci)) + if (UCSI_CCI_CONNECTOR(cci) && + !test_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags)) ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci)); if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags)) complete(&ua->complete); if (cci & UCSI_CCI_COMMAND_COMPLETE && - test_bit(COMMAND_PENDING, &ua->flags)) + test_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags)) complete(&ua->complete); } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 2b7e796c4..74d295312 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -185,8 +185,6 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev); static bool mlx5_vdpa_debug; -#define MLX5_CVQ_MAX_ENT 16 - #define MLX5_LOG_VIO_FLAG(_feature) \ do { \ if (features & BIT_ULL(_feature)) \ @@ -1980,9 +1978,16 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num) struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_virtqueue *mvq; - if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) + if (!is_index_valid(mvdev, idx)) return; + if (is_ctrl_vq_idx(mvdev, idx)) { + struct mlx5_control_vq *cvq = &mvdev->cvq; + + cvq->vring.vring.num = num; + return; + } + mvq = &ndev->vqs[idx]; mvq->num_ent = num; } @@ -2512,7 +2517,7 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev) u16 idx = cvq->vring.last_avail_idx; err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, - MLX5_CVQ_MAX_ENT, false, + cvq->vring.vring.num, false, (struct vring_desc *)(uintptr_t)cvq->desc_addr, (struct vring_avail *)(uintptr_t)cvq->driver_addr, (struct vring_used *)(uintptr_t)cvq->device_addr); diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 61bde476c..e7fc25bfd 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -120,7 +120,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim) for (i = 0; i < vdpasim->dev_attr.nas; i++) vhost_iotlb_reset(&vdpasim->iommu[i]); - vdpasim->running = true; + vdpasim->running = false; spin_unlock(&vdpasim->iommu_lock); vdpasim->features = 0; @@ -513,6 +513,7 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) spin_lock(&vdpasim->lock); vdpasim->status = status; + vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0; spin_unlock(&vdpasim->lock); } diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c index d74164abb..ab9d8e348 100644 --- a/drivers/vfio/container.c +++ b/drivers/vfio/container.c @@ -366,7 +366,7 @@ static int vfio_fops_open(struct inode *inode, struct file *filep) { struct vfio_container *container; - container = kzalloc(sizeof(*container), GFP_KERNEL); + container = kzalloc(sizeof(*container), GFP_KERNEL_ACCOUNT); if (!container) return -ENOMEM; diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c index 7b428eac3..b125b6edf 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c @@ -142,13 +142,14 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, irq = &vdev->mc_irqs[index]; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_fsl_mc_irq_handler(hwirq, irq); + if (irq->trigger) + eventfd_signal(irq->trigger, 1); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { u8 trigger = *(u8 *)data; - if (trigger) - vfio_fsl_mc_irq_handler(hwirq, irq); + if (trigger && irq->trigger) + eventfd_signal(irq->trigger, 1); } return 0; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 4a350421c..523e0144c 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1244,7 +1244,7 @@ static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos) if (vdev->msi_perm) return len; - vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL); + vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL_ACCOUNT); if (!vdev->msi_perm) return -ENOMEM; @@ -1731,11 +1731,11 @@ int vfio_config_init(struct vfio_pci_core_device *vdev) * no requirements on the length of a capability, so the gap between * capabilities needs byte granularity. */ - map = kmalloc(pdev->cfg_size, GFP_KERNEL); + map = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT); if (!map) return -ENOMEM; - vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL); + vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL_ACCOUNT); if (!vconfig) { kfree(map); return -ENOMEM; diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index e030c2120..f357fd157 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -141,7 +141,8 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev) * of the exclusive page in case that hot-add * device's bar is assigned into it. */ - dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL); + dummy_res = + kzalloc(sizeof(*dummy_res), GFP_KERNEL_ACCOUNT); if (dummy_res == NULL) goto no_mmap; @@ -856,7 +857,7 @@ int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev, region = krealloc(vdev->region, (vdev->num_regions + 1) * sizeof(*region), - GFP_KERNEL); + GFP_KERNEL_ACCOUNT); if (!region) return -ENOMEM; @@ -1637,7 +1638,7 @@ static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev, { struct vfio_pci_mmap_vma *mmap_vma; - mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT); if (!mmap_vma) return -ENOMEM; diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c index 5e6ca5926..dd70e2431 100644 --- a/drivers/vfio/pci/vfio_pci_igd.c +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -180,7 +180,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev) if (!addr || !(~addr)) return -ENODEV; - opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL); + opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL_ACCOUNT); if (!opregionvbt) return -ENOMEM; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 40c3d7cf1..03246a59b 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -55,17 +55,24 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused) { struct vfio_pci_core_device *vdev = opaque; - if (likely(is_intx(vdev) && !vdev->virq_disabled)) - eventfd_signal(vdev->ctx[0].trigger, 1); + if (likely(is_intx(vdev) && !vdev->virq_disabled)) { + struct eventfd_ctx *trigger; + + trigger = READ_ONCE(vdev->ctx[0].trigger); + if (likely(trigger)) + eventfd_signal(trigger, 1); + } } /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */ -bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) +static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; unsigned long flags; bool masked_changed = false; + lockdep_assert_held(&vdev->igate); + spin_lock_irqsave(&vdev->irqlock, flags); /* @@ -95,6 +102,17 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) return masked_changed; } +bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) +{ + bool mask_changed; + + mutex_lock(&vdev->igate); + mask_changed = __vfio_pci_intx_mask(vdev); + mutex_unlock(&vdev->igate); + + return mask_changed; +} + /* * If this is triggered by an eventfd, we can't call eventfd_signal * or else we'll deadlock on the eventfd wait queue. Return >0 when @@ -137,12 +155,21 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) return ret; } -void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) +static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) { + lockdep_assert_held(&vdev->igate); + if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) vfio_send_intx_eventfd(vdev, NULL); } +void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) +{ + mutex_lock(&vdev->igate); + __vfio_pci_intx_unmask(vdev); + mutex_unlock(&vdev->igate); +} + static irqreturn_t vfio_intx_handler(int irq, void *dev_id) { struct vfio_pci_core_device *vdev = dev_id; @@ -169,95 +196,104 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id) return ret; } -static int vfio_intx_enable(struct vfio_pci_core_device *vdev) +static int vfio_intx_enable(struct vfio_pci_core_device *vdev, + struct eventfd_ctx *trigger) { + struct pci_dev *pdev = vdev->pdev; + unsigned long irqflags; + char *name; + int ret; + if (!is_irq_none(vdev)) return -EINVAL; - if (!vdev->pdev->irq) + if (!pdev->irq) return -ENODEV; - vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); + if (!name) + return -ENOMEM; + + vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL_ACCOUNT); if (!vdev->ctx) return -ENOMEM; vdev->num_ctx = 1; + vdev->ctx[0].name = name; + vdev->ctx[0].trigger = trigger; + /* - * If the virtual interrupt is masked, restore it. Devices - * supporting DisINTx can be masked at the hardware level - * here, non-PCI-2.3 devices will have to wait until the - * interrupt is enabled. + * Fill the initial masked state based on virq_disabled. After + * enable, changing the DisINTx bit in vconfig directly changes INTx + * masking. igate prevents races during setup, once running masked + * is protected via irqlock. + * + * Devices supporting DisINTx also reflect the current mask state in + * the physical DisINTx bit, which is not affected during IRQ setup. + * + * Devices without DisINTx support require an exclusive interrupt. + * IRQ masking is performed at the IRQ chip. Again, igate protects + * against races during setup and IRQ handlers and irqfds are not + * yet active, therefore masked is stable and can be used to + * conditionally auto-enable the IRQ. + * + * irq_type must be stable while the IRQ handler is registered, + * therefore it must be set before request_irq(). */ vdev->ctx[0].masked = vdev->virq_disabled; - if (vdev->pci_2_3) - pci_intx(vdev->pdev, !vdev->ctx[0].masked); + if (vdev->pci_2_3) { + pci_intx(pdev, !vdev->ctx[0].masked); + irqflags = IRQF_SHARED; + } else { + irqflags = vdev->ctx[0].masked ? IRQF_NO_AUTOEN : 0; + } vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; + ret = request_irq(pdev->irq, vfio_intx_handler, + irqflags, vdev->ctx[0].name, vdev); + if (ret) { + vdev->irq_type = VFIO_PCI_NUM_IRQS; + kfree(name); + vdev->num_ctx = 0; + kfree(vdev->ctx); + return ret; + } + return 0; } -static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) +static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, + struct eventfd_ctx *trigger) { struct pci_dev *pdev = vdev->pdev; - unsigned long irqflags = IRQF_SHARED; - struct eventfd_ctx *trigger; - unsigned long flags; - int ret; + struct eventfd_ctx *old; - if (vdev->ctx[0].trigger) { - free_irq(pdev->irq, vdev); - kfree(vdev->ctx[0].name); - eventfd_ctx_put(vdev->ctx[0].trigger); - vdev->ctx[0].trigger = NULL; - } - - if (fd < 0) /* Disable only */ - return 0; - - vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", - pci_name(pdev)); - if (!vdev->ctx[0].name) - return -ENOMEM; + old = vdev->ctx[0].trigger; - trigger = eventfd_ctx_fdget(fd); - if (IS_ERR(trigger)) { - kfree(vdev->ctx[0].name); - return PTR_ERR(trigger); - } + WRITE_ONCE(vdev->ctx[0].trigger, trigger); - vdev->ctx[0].trigger = trigger; - - if (!vdev->pci_2_3) - irqflags = 0; - - ret = request_irq(pdev->irq, vfio_intx_handler, - irqflags, vdev->ctx[0].name, vdev); - if (ret) { - vdev->ctx[0].trigger = NULL; - kfree(vdev->ctx[0].name); - eventfd_ctx_put(trigger); - return ret; + /* Releasing an old ctx requires synchronizing in-flight users */ + if (old) { + synchronize_irq(pdev->irq); + vfio_virqfd_flush_thread(&vdev->ctx[0].unmask); + eventfd_ctx_put(old); } - /* - * INTx disable will stick across the new irq setup, - * disable_irq won't. - */ - spin_lock_irqsave(&vdev->irqlock, flags); - if (!vdev->pci_2_3 && vdev->ctx[0].masked) - disable_irq_nosync(pdev->irq); - spin_unlock_irqrestore(&vdev->irqlock, flags); - return 0; } static void vfio_intx_disable(struct vfio_pci_core_device *vdev) { + struct pci_dev *pdev = vdev->pdev; + vfio_virqfd_disable(&vdev->ctx[0].unmask); vfio_virqfd_disable(&vdev->ctx[0].mask); - vfio_intx_set_signal(vdev, -1); + free_irq(pdev->irq, vdev); + if (vdev->ctx[0].trigger) + eventfd_ctx_put(vdev->ctx[0].trigger); + kfree(vdev->ctx[0].name); vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->num_ctx = 0; kfree(vdev->ctx); @@ -284,7 +320,8 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi if (!is_irq_none(vdev)) return -EINVAL; - vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), + GFP_KERNEL_ACCOUNT); if (!vdev->ctx) return -ENOMEM; @@ -316,14 +353,14 @@ static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msi } static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev, - int vector, int fd, bool msix) + unsigned int vector, int fd, bool msix) { struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; u16 cmd; - if (vector < 0 || vector >= vdev->num_ctx) + if (vector >= vdev->num_ctx) return -EINVAL; irq = pci_irq_vector(pdev, vector); @@ -343,7 +380,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev, if (fd < 0) return 0; - vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)", + vdev->ctx[vector].name = kasprintf(GFP_KERNEL_ACCOUNT, + "vfio-msi%s[%d](%s)", msix ? "x" : "", vector, pci_name(pdev)); if (!vdev->ctx[vector].name) @@ -397,7 +435,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev, static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start, unsigned count, int32_t *fds, bool msix) { - int i, j, ret = 0; + unsigned int i, j; + int ret = 0; if (start >= vdev->num_ctx || start + count > vdev->num_ctx) return -EINVAL; @@ -408,8 +447,8 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start, } if (ret) { - for (--j; j >= (int)start; j--) - vfio_msi_set_vector_signal(vdev, j, -1, msix); + for (i = start; i < j; i++) + vfio_msi_set_vector_signal(vdev, i, -1, msix); } return ret; @@ -418,16 +457,15 @@ static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start, static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; - int i; + unsigned int i; u16 cmd; for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); vfio_virqfd_disable(&vdev->ctx[i].mask); + vfio_msi_set_vector_signal(vdev, i, -1, msix); } - vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); - cmd = vfio_pci_memory_lock_and_enable(vdev); pci_free_irq_vectors(pdev); vfio_pci_memory_unlock_and_restore(vdev, cmd); @@ -455,11 +493,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_pci_intx_unmask(vdev); + __vfio_pci_intx_unmask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t unmask = *(uint8_t *)data; if (unmask) - vfio_pci_intx_unmask(vdev); + __vfio_pci_intx_unmask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t fd = *(int32_t *)data; if (fd >= 0) @@ -482,11 +520,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_pci_intx_mask(vdev); + __vfio_pci_intx_mask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t mask = *(uint8_t *)data; if (mask) - vfio_pci_intx_mask(vdev); + __vfio_pci_intx_mask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { return -ENOTTY; /* XXX implement me */ } @@ -507,19 +545,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + struct eventfd_ctx *trigger = NULL; int32_t fd = *(int32_t *)data; int ret; - if (is_intx(vdev)) - return vfio_intx_set_signal(vdev, fd); + if (fd >= 0) { + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) + return PTR_ERR(trigger); + } - ret = vfio_intx_enable(vdev); - if (ret) - return ret; + if (is_intx(vdev)) + ret = vfio_intx_set_signal(vdev, trigger); + else + ret = vfio_intx_enable(vdev, trigger); - ret = vfio_intx_set_signal(vdev, fd); - if (ret) - vfio_intx_disable(vdev); + if (ret && trigger) + eventfd_ctx_put(trigger); return ret; } @@ -541,7 +583,7 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev, unsigned index, unsigned start, unsigned count, uint32_t flags, void *data) { - int i; + unsigned int i; bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index e352a033b..e27de61ac 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -470,7 +470,7 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, goto out_unlock; } - ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL); + ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL_ACCOUNT); if (!ioeventfd) { ret = -ENOMEM; goto out_unlock; diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c index c5b09ec0a..7f4341a8d 100644 --- a/drivers/vfio/platform/vfio_platform_irq.c +++ b/drivers/vfio/platform/vfio_platform_irq.c @@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, return 0; } +/* + * The trigger eventfd is guaranteed valid in the interrupt path + * and protected by the igate mutex when triggered via ioctl. + */ +static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx) +{ + if (likely(irq_ctx->trigger)) + eventfd_signal(irq_ctx->trigger, 1); +} + static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) { struct vfio_platform_irq *irq_ctx = dev_id; @@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) spin_unlock_irqrestore(&irq_ctx->lock, flags); if (ret == IRQ_HANDLED) - eventfd_signal(irq_ctx->trigger, 1); + vfio_send_eventfd(irq_ctx); return ret; } @@ -164,22 +174,19 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id) { struct vfio_platform_irq *irq_ctx = dev_id; - eventfd_signal(irq_ctx->trigger, 1); + vfio_send_eventfd(irq_ctx); return IRQ_HANDLED; } static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, - int fd, irq_handler_t handler) + int fd) { struct vfio_platform_irq *irq = &vdev->irqs[index]; struct eventfd_ctx *trigger; - int ret; if (irq->trigger) { - irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN); - free_irq(irq->hwirq, irq); - kfree(irq->name); + disable_irq(irq->hwirq); eventfd_ctx_put(irq->trigger); irq->trigger = NULL; } @@ -187,30 +194,20 @@ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, if (fd < 0) /* Disable only */ return 0; - irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)", - irq->hwirq, vdev->name); - if (!irq->name) - return -ENOMEM; - trigger = eventfd_ctx_fdget(fd); - if (IS_ERR(trigger)) { - kfree(irq->name); + if (IS_ERR(trigger)) return PTR_ERR(trigger); - } irq->trigger = trigger; - irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); - ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); - if (ret) { - kfree(irq->name); - eventfd_ctx_put(trigger); - irq->trigger = NULL; - return ret; - } - - if (!irq->masked) - enable_irq(irq->hwirq); + /* + * irq->masked effectively provides nested disables within the overall + * enable relative to trigger. Specifically request_irq() is called + * with NO_AUTOEN, therefore the IRQ is initially disabled. The user + * may only further disable the IRQ with a MASK operations because + * irq->masked is initially false. + */ + enable_irq(irq->hwirq); return 0; } @@ -229,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, handler = vfio_irq_handler; if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) - return vfio_set_trigger(vdev, index, -1, handler); + return vfio_set_trigger(vdev, index, -1); if (start != 0 || count != 1) return -EINVAL; @@ -237,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t fd = *(int32_t *)data; - return vfio_set_trigger(vdev, index, fd, handler); + return vfio_set_trigger(vdev, index, fd); } if (flags & VFIO_IRQ_SET_DATA_NONE) { @@ -261,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, unsigned start, unsigned count, uint32_t flags, void *data) = NULL; + /* + * For compatibility, errors from request_irq() are local to the + * SET_IRQS path and reflected in the name pointer. This allows, + * for example, polling mode fallback for an exclusive IRQ failure. + */ + if (IS_ERR(vdev->irqs[index].name)) + return PTR_ERR(vdev->irqs[index].name); + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_MASK: func = vfio_platform_set_irq_mask; @@ -281,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, int vfio_platform_irq_init(struct vfio_platform_device *vdev) { - int cnt = 0, i; + int cnt = 0, i, ret = 0; while (vdev->get_irq(vdev, cnt) >= 0) cnt++; @@ -292,37 +297,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev) for (i = 0; i < cnt; i++) { int hwirq = vdev->get_irq(vdev, i); + irq_handler_t handler = vfio_irq_handler; - if (hwirq < 0) + if (hwirq < 0) { + ret = -EINVAL; goto err; + } spin_lock_init(&vdev->irqs[i].lock); vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; - if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) + if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) { vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED; + handler = vfio_automasked_irq_handler; + } vdev->irqs[i].count = 1; vdev->irqs[i].hwirq = hwirq; vdev->irqs[i].masked = false; + vdev->irqs[i].name = kasprintf(GFP_KERNEL, + "vfio-irq[%d](%s)", hwirq, + vdev->name); + if (!vdev->irqs[i].name) { + ret = -ENOMEM; + goto err; + } + + ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN, + vdev->irqs[i].name, &vdev->irqs[i]); + if (ret) { + kfree(vdev->irqs[i].name); + vdev->irqs[i].name = ERR_PTR(ret); + } } vdev->num_irqs = cnt; return 0; err: + for (--i; i >= 0; i--) { + if (!IS_ERR(vdev->irqs[i].name)) { + free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); + kfree(vdev->irqs[i].name); + } + } kfree(vdev->irqs); - return -EINVAL; + return ret; } void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) { int i; - for (i = 0; i < vdev->num_irqs; i++) - vfio_set_trigger(vdev, i, -1, NULL); + for (i = 0; i < vdev->num_irqs; i++) { + vfio_virqfd_disable(&vdev->irqs[i].mask); + vfio_virqfd_disable(&vdev->irqs[i].unmask); + if (!IS_ERR(vdev->irqs[i].name)) { + free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); + if (vdev->irqs[i].trigger) + eventfd_ctx_put(vdev->irqs[i].trigger); + kfree(vdev->irqs[i].name); + } + } vdev->num_irqs = 0; kfree(vdev->irqs); diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c index 414e98d82..e06b32dde 100644 --- a/drivers/vfio/virqfd.c +++ b/drivers/vfio/virqfd.c @@ -104,6 +104,13 @@ static void virqfd_inject(struct work_struct *work) virqfd->thread(virqfd->opaque, virqfd->data); } +static void virqfd_flush_inject(struct work_struct *work) +{ + struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject); + + flush_work(&virqfd->inject); +} + int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *), void (*thread)(void *, void *), @@ -115,7 +122,7 @@ int vfio_virqfd_enable(void *opaque, int ret = 0; __poll_t events; - virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); + virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL_ACCOUNT); if (!virqfd) return -ENOMEM; @@ -127,6 +134,7 @@ int vfio_virqfd_enable(void *opaque, INIT_WORK(&virqfd->shutdown, virqfd_shutdown); INIT_WORK(&virqfd->inject, virqfd_inject); + INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject); irqfd = fdget(fd); if (!irqfd.file) { @@ -217,6 +225,19 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd) } EXPORT_SYMBOL_GPL(vfio_virqfd_disable); +void vfio_virqfd_flush_thread(struct virqfd **pvirqfd) +{ + unsigned long flags; + + spin_lock_irqsave(&virqfd_lock, flags); + if (*pvirqfd && (*pvirqfd)->thread) + queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject); + spin_unlock_irqrestore(&virqfd_lock, flags); + + flush_workqueue(vfio_irqfd_cleanup_wq); +} +EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread); + module_init(vfio_virqfd_init); module_exit(vfio_virqfd_exit); diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c index 882359dd2..aa0037939 100644 --- a/drivers/video/backlight/da9052_bl.c +++ b/drivers/video/backlight/da9052_bl.c @@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev) wleds->led_reg = platform_get_device_id(pdev)->driver_data; wleds->state = DA9052_WLEDS_OFF; + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = DA9052_MAX_BRIGHTNESS; diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c index 475f35635..0d43f6326 100644 --- a/drivers/video/backlight/lm3630a_bl.c +++ b/drivers/video/backlight/lm3630a_bl.c @@ -231,7 +231,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl) if (rval < 0) goto out_i2c_err; brightness |= rval; - goto out; + return brightness; } /* disable sleep */ @@ -242,11 +242,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl) rval = lm3630a_read(pchip, REG_BRT_A); if (rval < 0) goto out_i2c_err; - brightness = rval; + return rval; -out: - bl->props.brightness = brightness; - return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access register\n"); return 0; @@ -306,7 +303,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl) if (rval < 0) goto out_i2c_err; brightness |= rval; - goto out; + return brightness; } /* disable sleep */ @@ -317,11 +314,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl) rval = lm3630a_read(pchip, REG_BRT_B); if (rval < 0) goto out_i2c_err; - brightness = rval; + return rval; -out: - bl->props.brightness = brightness; - return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access register\n"); return 0; @@ -339,6 +333,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip) struct backlight_properties props; const char *label; + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) { props.brightness = pdata->leda_init_brt; diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c index 658091167..4c9726a7f 100644 --- a/drivers/video/backlight/lm3639_bl.c +++ b/drivers/video/backlight/lm3639_bl.c @@ -339,6 +339,7 @@ static int lm3639_probe(struct i2c_client *client, } /* backlight */ + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.brightness = pdata->init_brt_led; props.max_brightness = pdata->max_brt_led; diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c index ba42f3fe0..d9b95dbd4 100644 --- a/drivers/video/backlight/lp8788_bl.c +++ b/drivers/video/backlight/lp8788_bl.c @@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl) int init_brt; char *name; + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = MAX_BRIGHTNESS; diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c index 570a71509..78d51deab 100644 --- a/drivers/watchdog/stm32_iwdg.c +++ b/drivers/watchdog/stm32_iwdg.c @@ -21,6 +21,8 @@ #include <linux/platform_device.h> #include <linux/watchdog.h> +#define DEFAULT_TIMEOUT 10 + /* IWDG registers */ #define IWDG_KR 0x00 /* Key register */ #define IWDG_PR 0x04 /* Prescaler Register */ @@ -249,6 +251,7 @@ static int stm32_iwdg_probe(struct platform_device *pdev) wdd->parent = dev; wdd->info = &stm32_iwdg_info; wdd->ops = &stm32_iwdg_ops; + wdd->timeout = DEFAULT_TIMEOUT; wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate); wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler * 1000) / wdt->rate; |