diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:13:35 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:13:35 +0000 |
commit | 9ea788653ae1a5cea3f3853c5c50b58c98b3cd67 (patch) | |
tree | e1bf3b779c1dc9c5337c4442bf720073a3a53415 /drivers | |
parent | Adding upstream version 6.9.7. (diff) | |
download | linux-upstream/6.9.8.tar.xz linux-upstream/6.9.8.zip |
Adding upstream version 6.9.8.upstream/6.9.8
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers')
109 files changed, 1342 insertions, 578 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5eb38fbbbe..fc6fd583fa 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1975,8 +1975,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); - if (!host) - return -ENOMEM; + if (!host) { + rc = -ENOMEM; + goto err_rm_sysfs_file; + } host->private_data = hpriv; if (ahci_init_msi(pdev, n_ports, hpriv) < 0) { @@ -2031,11 +2033,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* initialize adapter */ rc = ahci_configure_dma_masks(pdev, hpriv); if (rc) - return rc; + goto err_rm_sysfs_file; rc = ahci_pci_reset_controller(host); if (rc) - return rc; + goto err_rm_sysfs_file; ahci_pci_init_controller(host); ahci_pci_print_info(host); @@ -2044,10 +2046,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = ahci_host_activate(host, &ahci_sht); if (rc) - return rc; + goto err_rm_sysfs_file; pm_runtime_put_noidle(&pdev->dev); return 0; + +err_rm_sysfs_file: + sysfs_remove_file_from_group(&pdev->dev.kobj, + &dev_attr_remapped_nvme.attr, NULL); + return rc; } static void ahci_shutdown_one(struct pci_dev *pdev) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 28caed151e..d937e6e5cc 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4181,8 +4181,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM }, /* Crucial devices with broken LPM support */ - { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, - { "CT240BX500SSD1", NULL, ATA_HORKAGE_NOLPM }, + { "CT*0BX*00SSD1", NULL, ATA_HORKAGE_NOLPM }, /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | @@ -5534,6 +5533,18 @@ struct ata_port *ata_port_alloc(struct ata_host *host) return ap; } +void ata_port_free(struct ata_port *ap) +{ + if (!ap) + return; + + kfree(ap->pmp_link); + kfree(ap->slave_link); + kfree(ap->ncq_sense_buf); + kfree(ap); +} +EXPORT_SYMBOL_GPL(ata_port_free); + static void ata_devres_release(struct device *gendev, void *res) { struct ata_host *host = dev_get_drvdata(gendev); @@ -5560,12 +5571,7 @@ static void ata_host_release(struct kref *kref) int i; for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - - kfree(ap->pmp_link); - kfree(ap->slave_link); - kfree(ap->ncq_sense_buf); - kfree(ap); + ata_port_free(host->ports[i]); host->ports[i] = NULL; } kfree(host); @@ -5615,8 +5621,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) if (!host) return NULL; - if (!devres_open_group(dev, NULL, GFP_KERNEL)) - goto err_free; + if (!devres_open_group(dev, NULL, GFP_KERNEL)) { + kfree(host); + return NULL; + } dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL); if (!dr) @@ -5648,8 +5656,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) err_out: devres_release_group(dev, NULL); - err_free: - kfree(host); return NULL; } EXPORT_SYMBOL_GPL(ata_host_alloc); @@ -5948,7 +5954,7 @@ int ata_host_register(struct ata_host *host, const struct scsi_host_template *sh * allocation time. */ for (i = host->n_ports; host->ports[i]; i++) - kfree(host->ports[i]); + ata_port_free(host->ports[i]); /* give ports names and add SCSI hosts */ for (i = 0; i < host->n_ports; i++) { diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c index b0f24cf3e8..4d3de4a358 100644 --- a/drivers/counter/ti-eqep.c +++ b/drivers/counter/ti-eqep.c @@ -6,6 +6,7 @@ */ #include <linux/bitops.h> +#include <linux/clk.h> #include <linux/counter.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> @@ -376,6 +377,7 @@ static int ti_eqep_probe(struct platform_device *pdev) struct counter_device *counter; struct ti_eqep_cnt *priv; void __iomem *base; + struct clk *clk; int err; counter = devm_counter_alloc(dev, sizeof(*priv)); @@ -415,6 +417,10 @@ static int ti_eqep_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_sync(dev); + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "failed to enable clock\n"); + err = counter_add(counter); if (err < 0) { pm_runtime_put_sync(dev); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dbbf299f42..3405bf69b1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -357,15 +357,14 @@ static void intel_pstate_set_itmt_prio(int cpu) int ret; ret = cppc_get_perf_caps(cpu, &cppc_perf); - if (ret) - return; - /* - * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. - * In this case we can't use CPPC.highest_perf to enable ITMT. - * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + * If CPPC is not available, fall back to MSR_HWP_CAPABILITIES bits [8:0]. + * + * Also, on some systems with overclocking enabled, CPPC.highest_perf is + * hardcoded to 0xff, so CPPC.highest_perf cannot be used to enable ITMT. + * Fall back to MSR_HWP_CAPABILITIES then too. */ - if (cppc_perf.highest_perf == CPPC_MAX_PERF) + if (ret || cppc_perf.highest_perf == CPPC_MAX_PERF) cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); /* diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index bc5a95665a..87008505f8 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -27,7 +27,14 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled); int cxl_region_init(void); void cxl_region_exit(void); int cxl_get_poison_by_endpoint(struct cxl_port *port); +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa); + #else +static inline +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) +{ + return NULL; +} static inline int cxl_get_poison_by_endpoint(struct cxl_port *port) { return 0; diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 7d97790b89..e01c16fdc7 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -52,6 +52,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port) struct cxl_dport *dport = NULL; int single_port_map[1]; unsigned long index; + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + + /* + * Capability checks are moot for passthrough decoders, support + * any and all possibilities. + */ + cxlhdm->interleave_mask = ~0U; + cxlhdm->iw_cap_mask = ~0UL; cxlsd = cxl_switch_decoder_alloc(port, 1); if (IS_ERR(cxlsd)) @@ -79,6 +87,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) cxlhdm->interleave_mask |= GENMASK(11, 8); if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) cxlhdm->interleave_mask |= GENMASK(14, 12); + cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8); + if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap)) + cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12); + if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap)) + cxlhdm->iw_cap_mask |= BIT(16); } static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index d4e259f3a7..0277726afd 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) } EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); -struct cxl_dpa_to_region_context { - struct cxl_region *cxlr; - u64 dpa; -}; - -static int __cxl_dpa_to_region(struct device *dev, void *arg) -{ - struct cxl_dpa_to_region_context *ctx = arg; - struct cxl_endpoint_decoder *cxled; - u64 dpa = ctx->dpa; - - if (!is_endpoint_decoder(dev)) - return 0; - - cxled = to_cxl_endpoint_decoder(dev); - if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) - return 0; - - if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) - return 0; - - dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, - dev_name(&cxled->cxld.region->dev)); - - ctx->cxlr = cxled->cxld.region; - - return 1; -} - -static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) -{ - struct cxl_dpa_to_region_context ctx; - struct cxl_port *port; - - ctx = (struct cxl_dpa_to_region_context) { - .dpa = dpa, - }; - port = cxlmd->endpoint; - if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) - device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); - - return ctx.cxlr; -} - static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) { struct cxl_dev_state *cxlds = cxlmd->cxlds; diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index e69625a8d6..c00f3a9331 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -62,10 +62,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data) return is_cxl_nvdimm_bridge(dev); } -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd) +/** + * cxl_find_nvdimm_bridge() - find a bridge device relative to a port + * @port: any descendant port of an nvdimm-bridge associated + * root-cxl-port + */ +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port) { - struct cxl_root *cxl_root __free(put_cxl_root) = - find_cxl_root(cxlmd->endpoint); + struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port); struct device *dev; if (!cxl_root) @@ -242,18 +246,20 @@ static void cxlmd_release_nvdimm(void *_cxlmd) /** * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm + * @parent_port: parent port for the (to be added) @cxlmd endpoint port * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations * * Return: 0 on success negative error code on failure. */ -int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd) +int devm_cxl_add_nvdimm(struct cxl_port *parent_port, + struct cxl_memdev *cxlmd) { struct cxl_nvdimm_bridge *cxl_nvb; struct cxl_nvdimm *cxl_nvd; struct device *dev; int rc; - cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); + cxl_nvb = cxl_find_nvdimm_bridge(parent_port); if (!cxl_nvb) return -ENODEV; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 18b9514964..a600feb8a4 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1101,6 +1101,26 @@ static int cxl_port_attach_region(struct cxl_port *port, } cxld = cxl_rr->decoder; + /* + * the number of targets should not exceed the target_count + * of the decoder + */ + if (is_switch_decoder(&cxld->dev)) { + struct cxl_switch_decoder *cxlsd; + + cxlsd = to_cxl_switch_decoder(&cxld->dev); + if (cxl_rr->nr_targets > cxlsd->nr_targets) { + dev_dbg(&cxlr->dev, + "%s:%s %s add: %s:%s @ %d overflows targets: %d\n", + dev_name(port->uport_dev), dev_name(&port->dev), + dev_name(&cxld->dev), dev_name(&cxlmd->dev), + dev_name(&cxled->cxld.dev), pos, + cxlsd->nr_targets); + rc = -ENXIO; + goto out_erase; + } + } + rc = cxl_rr_ep_add(cxl_rr, cxled); if (rc) { dev_dbg(&cxlr->dev, @@ -1210,6 +1230,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled, return 0; } +static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + unsigned int interleave_mask; + u8 eiw; + u16 eig; + int high_pos, low_pos; + + if (!test_bit(iw, &cxlhdm->iw_cap_mask)) + return -ENXIO; + /* + * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection), + * if eiw < 8: + * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw] + * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] + * + * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the + * interleave bits are none. + * + * if eiw >= 8: + * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3 + * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] + * + * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the + * interleave bits are none. + */ + ways_to_eiw(iw, &eiw); + if (eiw == 0 || eiw == 8) + return 0; + + granularity_to_eig(ig, &eig); + if (eiw > 8) + high_pos = eiw + eig - 1; + else + high_pos = eiw + eig + 7; + low_pos = eig + 8; + interleave_mask = GENMASK(high_pos, low_pos); + if (interleave_mask & ~cxlhdm->interleave_mask) + return -ENXIO; + + return 0; +} + static int cxl_port_setup_targets(struct cxl_port *port, struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled) @@ -1360,6 +1424,15 @@ static int cxl_port_setup_targets(struct cxl_port *port, return -ENXIO; } } else { + rc = check_interleave_cap(cxld, iw, ig); + if (rc) { + dev_dbg(&cxlr->dev, + "%s:%s iw: %d ig: %d is not supported\n", + dev_name(port->uport_dev), + dev_name(&port->dev), iw, ig); + return rc; + } + cxld->interleave_ways = iw; cxld->interleave_granularity = ig; cxld->hpa_range = (struct range) { @@ -1796,6 +1869,15 @@ static int cxl_region_attach(struct cxl_region *cxlr, struct cxl_dport *dport; int rc = -ENXIO; + rc = check_interleave_cap(&cxled->cxld, p->interleave_ways, + p->interleave_granularity); + if (rc) { + dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n", + dev_name(&cxled->cxld.dev), p->interleave_ways, + p->interleave_granularity); + return rc; + } + if (cxled->mode != cxlr->mode) { dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); @@ -2679,28 +2761,78 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port) return rc; } +struct cxl_dpa_to_region_context { + struct cxl_region *cxlr; + u64 dpa; +}; + +static int __cxl_dpa_to_region(struct device *dev, void *arg) +{ + struct cxl_dpa_to_region_context *ctx = arg; + struct cxl_endpoint_decoder *cxled; + struct cxl_region *cxlr; + u64 dpa = ctx->dpa; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxled = to_cxl_endpoint_decoder(dev); + if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res)) + return 0; + + if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) + return 0; + + /* + * Stop the region search (return 1) when an endpoint mapping is + * found. The region may not be fully constructed so offering + * the cxlr in the context structure is not guaranteed. + */ + cxlr = cxled->cxld.region; + if (cxlr) + dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, + dev_name(&cxlr->dev)); + else + dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa, + dev_name(dev)); + + ctx->cxlr = cxlr; + + return 1; +} + +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dpa_to_region_context ctx; + struct cxl_port *port; + + ctx = (struct cxl_dpa_to_region_context) { + .dpa = dpa, + }; + port = cxlmd->endpoint; + if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) + device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); + + return ctx.cxlr; +} + static struct lock_class_key cxl_pmem_region_key; -static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) +static int cxl_pmem_region_alloc(struct cxl_region *cxlr) { struct cxl_region_params *p = &cxlr->params; struct cxl_nvdimm_bridge *cxl_nvb; - struct cxl_pmem_region *cxlr_pmem; struct device *dev; int i; - down_read(&cxl_region_rwsem); - if (p->state != CXL_CONFIG_COMMIT) { - cxlr_pmem = ERR_PTR(-ENXIO); - goto out; - } + guard(rwsem_read)(&cxl_region_rwsem); + if (p->state != CXL_CONFIG_COMMIT) + return -ENXIO; - cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), - GFP_KERNEL); - if (!cxlr_pmem) { - cxlr_pmem = ERR_PTR(-ENOMEM); - goto out; - } + struct cxl_pmem_region *cxlr_pmem __free(kfree) = + kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL); + if (!cxlr_pmem) + return -ENOMEM; cxlr_pmem->hpa_range.start = p->res->start; cxlr_pmem->hpa_range.end = p->res->end; @@ -2717,12 +2849,9 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) * bridge for one device is the same for all. */ if (i == 0) { - cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); - if (!cxl_nvb) { - kfree(cxlr_pmem); - cxlr_pmem = ERR_PTR(-ENODEV); - goto out; - } + cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint); + if (!cxl_nvb) + return -ENODEV; cxlr->cxl_nvb = cxl_nvb; } m->cxlmd = cxlmd; @@ -2733,18 +2862,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) } dev = &cxlr_pmem->dev; - cxlr_pmem->cxlr = cxlr; - cxlr->cxlr_pmem = cxlr_pmem; device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); device_set_pm_not_required(dev); dev->parent = &cxlr->dev; dev->bus = &cxl_bus_type; dev->type = &cxl_pmem_region_type; -out: - up_read(&cxl_region_rwsem); + cxlr_pmem->cxlr = cxlr; + cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem); - return cxlr_pmem; + return 0; } static void cxl_dax_region_release(struct device *dev) @@ -2861,9 +2988,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) struct device *dev; int rc; - cxlr_pmem = cxl_pmem_region_alloc(cxlr); - if (IS_ERR(cxlr_pmem)) - return PTR_ERR(cxlr_pmem); + rc = cxl_pmem_region_alloc(cxlr); + if (rc) + return rc; + cxlr_pmem = cxlr->cxlr_pmem; cxl_nvb = cxlr->cxl_nvb; dev = &cxlr_pmem->dev; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 72fa477407..6f9270f2fa 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -45,6 +45,8 @@ #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4) #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8) #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9) +#define CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY BIT(11) +#define CXL_HDM_DECODER_INTERLEAVE_16_WAY BIT(12) #define CXL_HDM_DECODER_CTRL_OFFSET 0x4 #define CXL_HDM_DECODER_ENABLE BIT(1) #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10) @@ -848,8 +850,8 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm_bridge(struct device *dev); -int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd); +int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd); +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port); #ifdef CONFIG_CXL_REGION bool is_cxl_pmem_region(struct device *dev); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 36cee9c30c..07e65a7605 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -848,11 +848,21 @@ static inline void cxl_mem_active_dec(void) int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd); +/** + * struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities + * @regs: mapped registers, see devm_cxl_setup_hdm() + * @decoder_count: number of decoders for this port + * @target_count: for switch decoders, max downstream port targets + * @interleave_mask: interleave granularity capability, see check_interleave_cap() + * @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap() + * @port: mapped cxl_port, see devm_cxl_setup_hdm() + */ struct cxl_hdm { struct cxl_component_regs regs; unsigned int decoder_count; unsigned int target_count; unsigned int interleave_mask; + unsigned long iw_cap_mask; struct cxl_port *port; }; diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 0c79d9ce87..2f1b49bfe1 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -152,6 +152,15 @@ static int cxl_mem_probe(struct device *dev) return -ENXIO; } + if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { + rc = devm_cxl_add_nvdimm(parent_port, cxlmd); + if (rc) { + if (rc == -ENODEV) + dev_info(dev, "PMEM disabled by platform\n"); + return rc; + } + } + if (dport->rch) endpoint_parent = parent_port->uport_dev; else @@ -174,14 +183,6 @@ unlock: if (rc) return rc; - if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { - rc = devm_cxl_add_nvdimm(cxlmd); - if (rc == -ENODEV) - dev_info(dev, "PMEM disabled by platform\n"); - else - return rc; - } - /* * The kernel may be operating out of CXL memory on this device, * there is no spec defined way to determine whether this device diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index bb499e3629..1d0175d635 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -225,6 +225,11 @@ static int davinci_gpio_probe(struct platform_device *pdev) else nirq = DIV_ROUND_UP(ngpio, 16); + if (nirq > MAX_INT_PER_BANK) { + dev_err(dev, "Too many IRQs!\n"); + return -EINVAL; + } + chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL); if (!chips) return -ENOMEM; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 9dad67ea25..5639abce6e 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -89,6 +89,10 @@ struct linehandle_state { GPIOHANDLE_REQUEST_OPEN_DRAIN | \ GPIOHANDLE_REQUEST_OPEN_SOURCE) +#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \ + (GPIOHANDLE_REQUEST_INPUT | \ + GPIOHANDLE_REQUEST_OUTPUT) + static int linehandle_validate_flags(u32 flags) { /* Return an error if an unknown flag is set */ @@ -169,21 +173,21 @@ static long linehandle_set_config(struct linehandle_state *lh, if (ret) return ret; + /* Lines must be reconfigured explicitly as input or output. */ + if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS)) + return -EINVAL; + for (i = 0; i < lh->num_descs; i++) { desc = lh->descs[i]; - linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags); + linehandle_flags_to_desc_flags(lflags, &desc->flags); - /* - * Lines have to be requested explicitly for input - * or output, else the line will be treated "as is". - */ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { int val = !!gcnf.default_values[i]; ret = gpiod_direction_output(desc, val); if (ret) return ret; - } else if (lflags & GPIOHANDLE_REQUEST_INPUT) { + } else { ret = gpiod_direction_input(desc); if (ret) return ret; @@ -1530,12 +1534,14 @@ static long linereq_set_config(struct linereq *lr, void __user *ip) line = &lr->lines[i]; desc = lr->lines[i].desc; flags = gpio_v2_line_config_flags(&lc, i); - gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); - edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; /* - * Lines have to be requested explicitly for input - * or output, else the line will be treated "as is". + * Lines not explicitly reconfigured as input or output + * are left unchanged. */ + if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS)) + continue; + gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags); + edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS; if (flags & GPIO_V2_LINE_FLAG_OUTPUT) { int val = gpio_v2_line_config_output_value(&lc, i); @@ -1543,7 +1549,7 @@ static long linereq_set_config(struct linereq *lr, void __user *ip) ret = gpiod_direction_output(desc, val); if (ret) return ret; - } else if (flags & GPIO_V2_LINE_FLAG_INPUT) { + } else { ret = gpiod_direction_input(desc); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index c8c23dbb90..12b4885182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -399,7 +399,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, mem_channel_number = vram_info->v30.channel_num; mem_channel_width = vram_info->v30.channel_width; if (vram_width) - *vram_width = mem_channel_number * (1 << mem_channel_width); + *vram_width = mem_channel_number * 16; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 941d6e379b..eb8af02332 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5121,11 +5121,14 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev) dev_info(adev->dev, "GPU mode1 reset\n"); + /* Cache the state before bus master disable. The saved config space + * values are used in other cases like restore after mode-2 reset. + */ + amdgpu_device_cache_pci_state(adev->pdev); + /* disable BM */ pci_clear_master(adev->pdev); - amdgpu_device_cache_pci_state(adev->pdev); - if (amdgpu_dpm_is_mode1_reset_supported(adev)) { dev_info(adev->dev, "GPU smu mode1 reset\n"); ret = amdgpu_dpm_mode1_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 8baa2e0935..ba6d1876ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -3,6 +3,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_vblank.h> #include "amdgpu.h" @@ -314,7 +315,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, return 0; } afb = to_amdgpu_framebuffer(new_state->fb); - obj = new_state->fb->obj[0]; + + obj = drm_gem_fb_get_obj(new_state->fb, 0); + if (!obj) { + DRM_ERROR("Failed to get obj from framebuffer\n"); + return -EINVAL; + } + rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); @@ -368,12 +375,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; + struct drm_gem_object *obj; int r; if (!old_state->fb) return; - rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); + obj = drm_gem_fb_get_obj(old_state->fb, 0); + if (!obj) { + DRM_ERROR("Failed to get obj from framebuffer\n"); + return; + } + + rbo = gem_to_amdgpu_bo(obj); r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { DRM_ERROR("failed to reserve rbo before unpin\n"); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index f15d1dbad6..b72ed3e78d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -327,6 +327,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id); } + if (dml_pipe_idx == 0xFFFFFFFF) + continue; ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]); ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id); @@ -468,6 +470,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id); else dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id); + + if (dml_pipe_idx == 0xFFFFFFFF) + continue; total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx); if (total_det_allocated > max_det_size) { need_recalculation = true; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 289f5d1333..f608dd3bba 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1590,9 +1590,17 @@ static bool retrieve_link_cap(struct dc_link *link) return false; } - if (dp_is_lttpr_present(link)) + if (dp_is_lttpr_present(link)) { configure_lttpr_mode_transparent(link); + // Echo TOTAL_LTTPR_CNT back downstream + core_link_write_dpcd( + link, + DP_TOTAL_LTTPR_CNT, + &link->dpcd_caps.lttpr_caps.phy_repeater_cnt, + sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt)); + } + /* Read DP tunneling information. */ status = dpcd_get_tunneling_device_data(link); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 04d142f974..2fb1d00ff9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -892,7 +892,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, + .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE, .using_dml2 = false, }; diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index 914f28e9f2..aee5170f5f 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -177,4 +177,9 @@ enum dpcd_psr_sink_states { #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A +/* Remove once drm_dp_helper.h is updated upstream */ +#ifndef DP_TOTAL_LTTPR_CNT +#define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */ +#endif + #endif /* __DAL_DPCD_DEFS_H__ */ diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d612133e2c..117237d352 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -524,6 +524,9 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) if (!info) return ERR_PTR(-ENOMEM); + if (!drm_leak_fbdev_smem) + info->flags |= FBINFO_HIDE_SMEM_START; + ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) goto err_release; @@ -1860,9 +1863,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) info = fb_helper->info; info->var.pixclock = 0; - if (!drm_leak_fbdev_smem) - info->flags |= FBINFO_HIDE_SMEM_START; - /* Need to drop locks to avoid recursive deadlock in * register_framebuffer. This is ok because the only thing left to do is * register the fbdev emulation instance in kernel_fb_helper_list. */ diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 6c9427bb40..13cd754af3 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -130,7 +130,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, info->flags |= FBINFO_READS_FAST; /* signal caching */ info->screen_size = sizes->surface_height * fb->pitches[0]; info->screen_buffer = map.vaddr; - info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); + if (!(info->flags & FBINFO_HIDE_SMEM_START)) { + if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer))) + info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); + } info->fix.smem_len = info->screen_size; return 0; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 638ffa4444..714e42b051 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -469,14 +469,12 @@ void drm_file_update_pid(struct drm_file *filp) dev = filp->minor->dev; mutex_lock(&dev->filelist_mutex); + get_pid(pid); old = rcu_replace_pointer(filp->pid, pid, 1); mutex_unlock(&dev->filelist_mutex); - if (pid != old) { - get_pid(pid); - synchronize_rcu(); - put_pid(old); - } + synchronize_rcu(); + put_pid(old); } /** diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 40371b8a9b..93bc1cc1ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -298,6 +298,7 @@ void i915_vma_revoke_fence(struct i915_vma *vma) return; GEM_BUG_ON(fence->vma != vma); + i915_active_wait(&fence->active); GEM_BUG_ON(!i915_active_is_idle(&fence->active)); GEM_BUG_ON(atomic_read(&fence->pin_count)); diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 670c9739e5..2033214c4b 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -209,6 +209,8 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, struct drm_display_mode *mode; mode = drm_mode_duplicate(encoder->dev, tv_mode); + if (!mode) + continue; mode->clock = tv_norm->tv_enc_mode.vrefresh * mode->htotal / 1000 * @@ -258,6 +260,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, if (modes[i].hdisplay == output_mode->hdisplay && modes[i].vdisplay == output_mode->vdisplay) { mode = drm_mode_duplicate(encoder->dev, output_mode); + if (!mode) + continue; mode->type |= DRM_MODE_TYPE_PREFERRED; } else { @@ -265,6 +269,8 @@ static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, modes[i].vdisplay, 60, false, (output_mode->flags & DRM_MODE_FLAG_INTERLACE), false); + if (!mode) + continue; } /* CVT modes are sometimes unsuitable... */ diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 2ffe5f68a8..4c8c317191 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -1080,10 +1080,10 @@ static int ili9881c_prepare(struct drm_panel *panel) msleep(5); /* And reset it */ - gpiod_set_value(ctx->reset, 1); + gpiod_set_value_cansleep(ctx->reset, 1); msleep(20); - gpiod_set_value(ctx->reset, 0); + gpiod_set_value_cansleep(ctx->reset, 0); msleep(20); for (i = 0; i < ctx->desc->init_length; i++) { @@ -1138,7 +1138,7 @@ static int ili9881c_unprepare(struct drm_panel *panel) mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); regulator_disable(ctx->power); - gpiod_set_value(ctx->reset, 1); + gpiod_set_value_cansleep(ctx->reset, 1); return 0; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index e8fe5a6945..6aac6f2acc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2707,6 +2707,7 @@ static const struct display_timing koe_tx26d202vm0bwa_timing = { .vfront_porch = { 3, 5, 10 }, .vback_porch = { 2, 5, 10 }, .vsync_len = { 5, 5, 5 }, + .flags = DISPLAY_FLAGS_DE_HIGH, }; static const struct panel_desc koe_tx26d202vm0bwa = { diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3e5ff17e3c..0999c8eaae 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -132,7 +132,6 @@ extern int radeon_cik_support; /* RADEON_IB_POOL_SIZE must be a power of 2 */ #define RADEON_IB_POOL_SIZE 16 #define RADEON_DEBUGFS_MAX_COMPONENTS 32 -#define RADEONFB_CONN_LIMIT 4 #define RADEON_BIOS_NUM_SCRATCH 8 /* internal ring indices */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index efd18c8d84..5f1d24d312 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -683,7 +683,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc; - radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + radeon_crtc = kzalloc(sizeof(*radeon_crtc), GFP_KERNEL); if (radeon_crtc == NULL) return; @@ -709,12 +709,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index) dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; -#if 0 - radeon_crtc->mode_set.crtc = &radeon_crtc->base; - radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); - radeon_crtc->mode_set.num_connectors = 0; -#endif - if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) radeon_atombios_init_crtc(dev, radeon_crtc); else diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 68d3d623a0..ccec291b02 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -74,17 +74,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { struct xe_devcoredump *coredump = data; - struct xe_device *xe = coredump_to_xe(coredump); - struct xe_devcoredump_snapshot *ss = &coredump->snapshot; + struct xe_device *xe; + struct xe_devcoredump_snapshot *ss; struct drm_printer p; struct drm_print_iterator iter; struct timespec64 ts; int i; - /* Our device is gone already... */ - if (!data || !coredump_to_xe(coredump)) + if (!coredump) return -ENODEV; + xe = coredump_to_xe(coredump); + ss = &coredump->snapshot; + /* Ensure delayed work is captured before continuing */ flush_work(&ss->work); diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c index e148934d55..351ab902eb 100644 --- a/drivers/gpu/drm/xe/xe_pat.c +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -457,7 +457,7 @@ void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p) { struct xe_device *xe = gt_to_xe(gt); - if (!xe->pat.ops->dump) + if (!xe->pat.ops) return; xe->pat.ops->dump(gt, p); diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index 3107d2a124..fb35e46d68 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -207,6 +207,11 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe) u64 stolen_size, io_size, pgsize; int err; + if (!mgr) { + drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n"); + return; + } + if (IS_SRIOV_VF(xe)) stolen_size = 0; else if (IS_DGFX(xe)) diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c index 115ec745e5..0678faf832 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c @@ -91,7 +91,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man, min_page_size = mgr->default_page_size; if (tbo->page_alignment) - min_page_size = tbo->page_alignment << PAGE_SHIFT; + min_page_size = (u64)tbo->page_alignment << PAGE_SHIFT; if (WARN_ON(min_page_size < mm->chunk_size)) { err = -EINVAL; diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c index a49642bbae..ca43e98cae 100644 --- a/drivers/i2c/i2c-slave-testunit.c +++ b/drivers/i2c/i2c-slave-testunit.c @@ -118,9 +118,12 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client, queue_delayed_work(system_long_wq, &tu->worker, msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY])); } - fallthrough; + break; case I2C_SLAVE_WRITE_REQUESTED: + if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags)) + return -EBUSY; + memset(tu->regs, 0, TU_NUM_REGS); tu->reg_idx = 0; break; diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index c2da5066e9..80b57d3ee3 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -330,6 +330,8 @@ config DMARD10 config FXLS8962AF tristate depends on I2C || !I2C # cannot be built-in for modular I2C + select IIO_BUFFER + select IIO_KFIFO_BUF config FXLS8962AF_I2C tristate "NXP FXLS8962AF/FXLS8964AF Accelerometer I2C Driver" diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 468c2656d2..98648c679a 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -157,6 +157,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev, ret = ad7266_read_single(st, val, chan->address); iio_device_release_direct_mode(indio_dev); + if (ret < 0) + return ret; *val = (*val >> 2) & 0xfff; if (chan->scan_type.sign == 's') *val = sign_extend32(*val, diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c index f0b71a1220..f52abf7592 100644 --- a/drivers/iio/adc/xilinx-ams.c +++ b/drivers/iio/adc/xilinx-ams.c @@ -414,8 +414,12 @@ static void ams_enable_channel_sequence(struct iio_dev *indio_dev) /* Run calibration of PS & PL as part of the sequence */ scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX); - for (i = 0; i < indio_dev->num_channels; i++) - scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index); + for (i = 0; i < indio_dev->num_channels; i++) { + const struct iio_chan_spec *chan = &indio_dev->channels[i]; + + if (chan->scan_index < AMS_CTRL_SEQ_BASE) + scan_mask |= BIT_ULL(chan->scan_index); + } if (ams->ps_base) { /* put sysmon in a soft reset to change the sequence */ diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h index 4edc5d21cb..f959252a4f 100644 --- a/drivers/iio/chemical/bme680.h +++ b/drivers/iio/chemical/bme680.h @@ -54,7 +54,9 @@ #define BME680_NB_CONV_MASK GENMASK(3, 0) #define BME680_REG_MEAS_STAT_0 0x1D +#define BME680_NEW_DATA_BIT BIT(7) #define BME680_GAS_MEAS_BIT BIT(6) +#define BME680_MEAS_BIT BIT(5) /* Calibration Parameters */ #define BME680_T2_LSB_REG 0x8A diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index ef5e0e46fd..500f56834b 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c @@ -10,6 +10,7 @@ */ #include <linux/acpi.h> #include <linux/bitfield.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/module.h> #include <linux/log2.h> @@ -38,7 +39,7 @@ struct bme680_calib { s8 par_h3; s8 par_h4; s8 par_h5; - s8 par_h6; + u8 par_h6; s8 par_h7; s8 par_gh1; s16 par_gh2; @@ -342,10 +343,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data, if (!calib->par_t2) bme680_read_calib(data, calib); - var1 = (adc_temp >> 3) - (calib->par_t1 << 1); + var1 = (adc_temp >> 3) - ((s32)calib->par_t1 << 1); var2 = (var1 * calib->par_t2) >> 11; var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; - var3 = (var3 * (calib->par_t3 << 4)) >> 14; + var3 = (var3 * ((s32)calib->par_t3 << 4)) >> 14; data->t_fine = var2 + var3; calc_temp = (data->t_fine * 5 + 128) >> 8; @@ -368,9 +369,9 @@ static u32 bme680_compensate_press(struct bme680_data *data, var1 = (data->t_fine >> 1) - 64000; var2 = ((((var1 >> 2) * (var1 >> 2)) >> 11) * calib->par_p6) >> 2; var2 = var2 + (var1 * calib->par_p5 << 1); - var2 = (var2 >> 2) + (calib->par_p4 << 16); + var2 = (var2 >> 2) + ((s32)calib->par_p4 << 16); var1 = (((((var1 >> 2) * (var1 >> 2)) >> 13) * - (calib->par_p3 << 5)) >> 3) + + ((s32)calib->par_p3 << 5)) >> 3) + ((calib->par_p2 * var1) >> 1); var1 = var1 >> 18; var1 = ((32768 + var1) * calib->par_p1) >> 15; @@ -388,7 +389,7 @@ static u32 bme680_compensate_press(struct bme680_data *data, var3 = ((press_comp >> 8) * (press_comp >> 8) * (press_comp >> 8) * calib->par_p10) >> 17; - press_comp += (var1 + var2 + var3 + (calib->par_p7 << 7)) >> 4; + press_comp += (var1 + var2 + var3 + ((s32)calib->par_p7 << 7)) >> 4; return press_comp; } @@ -414,7 +415,7 @@ static u32 bme680_compensate_humid(struct bme680_data *data, (((temp_scaled * ((temp_scaled * calib->par_h5) / 100)) >> 6) / 100) + (1 << 14))) >> 10; var3 = var1 * var2; - var4 = calib->par_h6 << 7; + var4 = (s32)calib->par_h6 << 7; var4 = (var4 + ((temp_scaled * calib->par_h7) / 100)) >> 4; var5 = ((var3 >> 14) * (var3 >> 14)) >> 10; var6 = (var4 * var5) >> 1; @@ -532,6 +533,43 @@ static u8 bme680_oversampling_to_reg(u8 val) return ilog2(val) + 1; } +/* + * Taken from Bosch BME680 API: + * https://github.com/boschsensortec/BME68x_SensorAPI/blob/v4.4.8/bme68x.c#L490 + */ +static int bme680_wait_for_eoc(struct bme680_data *data) +{ + struct device *dev = regmap_get_device(data->regmap); + unsigned int check; + int ret; + /* + * (Sum of oversampling ratios * time per oversampling) + + * TPH measurement + gas measurement + wait transition from forced mode + * + heater duration + */ + int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press + + data->oversampling_humid) * 1936) + (477 * 4) + + (477 * 5) + 1000 + (data->heater_dur * 1000); + + usleep_range(wait_eoc_us, wait_eoc_us + 100); + + ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check); + if (ret) { + dev_err(dev, "failed to read measurement status register.\n"); + return ret; + } + if (check & BME680_MEAS_BIT) { + dev_err(dev, "Device measurement cycle incomplete.\n"); + return -EBUSY; + } + if (!(check & BME680_NEW_DATA_BIT)) { + dev_err(dev, "No new data available from the device.\n"); + return -ENODATA; + } + + return 0; +} + static int bme680_chip_config(struct bme680_data *data) { struct device *dev = regmap_get_device(data->regmap); @@ -622,6 +660,10 @@ static int bme680_read_temp(struct bme680_data *data, int *val) if (ret < 0) return ret; + ret = bme680_wait_for_eoc(data); + if (ret) + return ret; + ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB, &tmp, 3); if (ret < 0) { @@ -678,7 +720,7 @@ static int bme680_read_press(struct bme680_data *data, } *val = bme680_compensate_press(data, adc_press); - *val2 = 100; + *val2 = 1000; return IIO_VAL_FRACTIONAL; } @@ -738,6 +780,10 @@ static int bme680_read_gas(struct bme680_data *data, if (ret < 0) return ret; + ret = bme680_wait_for_eoc(data); + if (ret) + return ret; + ret = regmap_read(data->regmap, BME680_REG_MEAS_STAT_0, &check); if (check & BME680_GAS_MEAS_BIT) { dev_err(dev, "gas measurement incomplete\n"); diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c index 1e5d0d4797..1e702fcc48 100644 --- a/drivers/iio/humidity/hdc3020.c +++ b/drivers/iio/humidity/hdc3020.c @@ -18,6 +18,7 @@ #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/math64.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/units.h> @@ -63,8 +64,10 @@ #define HDC3020_CRC8_POLYNOMIAL 0x31 -#define HDC3020_MIN_TEMP -40 -#define HDC3020_MAX_TEMP 125 +#define HDC3020_MIN_TEMP_MICRO -39872968 +#define HDC3020_MAX_TEMP_MICRO 124875639 +#define HDC3020_MAX_TEMP_HYST_MICRO 164748607 +#define HDC3020_MAX_HUM_MICRO 99220264 struct hdc3020_data { struct i2c_client *client; @@ -363,6 +366,105 @@ static int hdc3020_write_raw(struct iio_dev *indio_dev, return -EINVAL; } +static int hdc3020_thresh_get_temp(u16 thresh) +{ + int temp; + + /* + * Get the temperature threshold from 9 LSBs, shift them to get + * the truncated temperature threshold representation and + * calculate the threshold according to the formula in the + * datasheet. Result is degree celsius scaled by 65535. + */ + temp = FIELD_GET(HDC3020_THRESH_TEMP_MASK, thresh) << + HDC3020_THRESH_TEMP_TRUNC_SHIFT; + + return -2949075 + (175 * temp); +} + +static int hdc3020_thresh_get_hum(u16 thresh) +{ + int hum; + + /* + * Get the humidity threshold from 7 MSBs, shift them to get the + * truncated humidity threshold representation and calculate the + * threshold according to the formula in the datasheet. Result is + * percent scaled by 65535. + */ + hum = FIELD_GET(HDC3020_THRESH_HUM_MASK, thresh) << + HDC3020_THRESH_HUM_TRUNC_SHIFT; + + return hum * 100; +} + +static u16 hdc3020_thresh_set_temp(int s_temp, u16 curr_thresh) +{ + u64 temp; + u16 thresh; + + /* + * Calculate temperature threshold, shift it down to get the + * truncated threshold representation in the 9LSBs while keeping + * the current humidity threshold in the 7 MSBs. + */ + temp = (u64)(s_temp + 45000000) * 65535ULL; + temp = div_u64(temp, 1000000 * 175) >> HDC3020_THRESH_TEMP_TRUNC_SHIFT; + thresh = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, temp); + thresh |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, curr_thresh) << + HDC3020_THRESH_HUM_TRUNC_SHIFT); + + return thresh; +} + +static u16 hdc3020_thresh_set_hum(int s_hum, u16 curr_thresh) +{ + u64 hum; + u16 thresh; + + /* + * Calculate humidity threshold, shift it down and up to get the + * truncated threshold representation in the 7MSBs while keeping + * the current temperature threshold in the 9 LSBs. + */ + hum = (u64)(s_hum) * 65535ULL; + hum = div_u64(hum, 1000000 * 100) >> HDC3020_THRESH_HUM_TRUNC_SHIFT; + thresh = FIELD_PREP(HDC3020_THRESH_HUM_MASK, hum); + thresh |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, curr_thresh); + + return thresh; +} + +static +int hdc3020_thresh_clr(s64 s_thresh, s64 s_hyst, enum iio_event_direction dir) +{ + s64 s_clr; + + /* + * Include directions when calculation the clear value, + * since hysteresis is unsigned by definition and the + * clear value is an absolute value which is signed. + */ + if (dir == IIO_EV_DIR_RISING) + s_clr = s_thresh - s_hyst; + else + s_clr = s_thresh + s_hyst; + + /* Divide by 65535 to get units of micro */ + return div_s64(s_clr, 65535); +} + +static int _hdc3020_write_thresh(struct hdc3020_data *data, u16 reg, u16 val) +{ + u8 buf[5]; + + put_unaligned_be16(reg, buf); + put_unaligned_be16(val, buf + 2); + buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE); + + return hdc3020_write_bytes(data, buf, 5); +} + static int hdc3020_write_thresh(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, enum iio_event_type type, @@ -371,67 +473,126 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev, int val, int val2) { struct hdc3020_data *data = iio_priv(indio_dev); - u8 buf[5]; - u64 tmp; - u16 reg; - int ret; - - /* Supported temperature range is from –40 to 125 degree celsius */ - if (val < HDC3020_MIN_TEMP || val > HDC3020_MAX_TEMP) - return -EINVAL; - - /* Select threshold register */ - if (info == IIO_EV_INFO_VALUE) { - if (dir == IIO_EV_DIR_RISING) - reg = HDC3020_S_T_RH_THRESH_HIGH; - else - reg = HDC3020_S_T_RH_THRESH_LOW; + u16 reg, reg_val, reg_thresh_rd, reg_clr_rd, reg_thresh_wr, reg_clr_wr; + s64 s_thresh, s_hyst, s_clr; + int s_val, thresh, clr, ret; + + /* Select threshold registers */ + if (dir == IIO_EV_DIR_RISING) { + reg_thresh_rd = HDC3020_R_T_RH_THRESH_HIGH; + reg_thresh_wr = HDC3020_S_T_RH_THRESH_HIGH; + reg_clr_rd = HDC3020_R_T_RH_THRESH_HIGH_CLR; + reg_clr_wr = HDC3020_S_T_RH_THRESH_HIGH_CLR; } else { - if (dir == IIO_EV_DIR_RISING) - reg = HDC3020_S_T_RH_THRESH_HIGH_CLR; - else - reg = HDC3020_S_T_RH_THRESH_LOW_CLR; + reg_thresh_rd = HDC3020_R_T_RH_THRESH_LOW; + reg_thresh_wr = HDC3020_S_T_RH_THRESH_LOW; + reg_clr_rd = HDC3020_R_T_RH_THRESH_LOW_CLR; + reg_clr_wr = HDC3020_S_T_RH_THRESH_LOW_CLR; } guard(mutex)(&data->lock); - ret = hdc3020_read_be16(data, reg); + ret = hdc3020_read_be16(data, reg_thresh_rd); if (ret < 0) return ret; + thresh = ret; + ret = hdc3020_read_be16(data, reg_clr_rd); + if (ret < 0) + return ret; + + clr = ret; + /* Scale value to include decimal part into calculations */ + s_val = (val < 0) ? (val * 1000000 - val2) : (val * 1000000 + val2); switch (chan->type) { case IIO_TEMP: - /* - * Calculate temperature threshold, shift it down to get the - * truncated threshold representation in the 9LSBs while keeping - * the current humidity threshold in the 7 MSBs. - */ - tmp = ((u64)(((val + 45) * MICRO) + val2)) * 65535ULL; - tmp = div_u64(tmp, MICRO * 175); - val = tmp >> HDC3020_THRESH_TEMP_TRUNC_SHIFT; - val = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, val); - val |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, ret) << - HDC3020_THRESH_HUM_TRUNC_SHIFT); + switch (info) { + case IIO_EV_INFO_VALUE: + s_val = max(s_val, HDC3020_MIN_TEMP_MICRO); + s_val = min(s_val, HDC3020_MAX_TEMP_MICRO); + reg = reg_thresh_wr; + reg_val = hdc3020_thresh_set_temp(s_val, thresh); + ret = _hdc3020_write_thresh(data, reg, reg_val); + if (ret < 0) + return ret; + + /* Calculate old hysteresis */ + s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000; + s_clr = (s64)hdc3020_thresh_get_temp(clr) * 1000000; + s_hyst = div_s64(abs(s_thresh - s_clr), 65535); + /* Set new threshold */ + thresh = reg_val; + /* Set old hysteresis */ + s_val = s_hyst; + fallthrough; + case IIO_EV_INFO_HYSTERESIS: + /* + * Function hdc3020_thresh_get_temp returns temperature + * in degree celsius scaled by 65535. Scale by 1000000 + * to be able to subtract scaled hysteresis value. + */ + s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000; + /* + * Units of s_val are in micro degree celsius, scale by + * 65535 to get same units as s_thresh. + */ + s_val = min(abs(s_val), HDC3020_MAX_TEMP_HYST_MICRO); + s_hyst = (s64)s_val * 65535; + s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir); + s_clr = max(s_clr, HDC3020_MIN_TEMP_MICRO); + s_clr = min(s_clr, HDC3020_MAX_TEMP_MICRO); + reg = reg_clr_wr; + reg_val = hdc3020_thresh_set_temp(s_clr, clr); + break; + default: + return -EOPNOTSUPP; + } break; case IIO_HUMIDITYRELATIVE: - /* - * Calculate humidity threshold, shift it down and up to get the - * truncated threshold representation in the 7MSBs while keeping - * the current temperature threshold in the 9 LSBs. - */ - tmp = ((u64)((val * MICRO) + val2)) * 65535ULL; - tmp = div_u64(tmp, MICRO * 100); - val = tmp >> HDC3020_THRESH_HUM_TRUNC_SHIFT; - val = FIELD_PREP(HDC3020_THRESH_HUM_MASK, val); - val |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret); + s_val = (s_val < 0) ? 0 : min(s_val, HDC3020_MAX_HUM_MICRO); + switch (info) { + case IIO_EV_INFO_VALUE: + reg = reg_thresh_wr; + reg_val = hdc3020_thresh_set_hum(s_val, thresh); + ret = _hdc3020_write_thresh(data, reg, reg_val); + if (ret < 0) + return ret; + + /* Calculate old hysteresis */ + s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000; + s_clr = (s64)hdc3020_thresh_get_hum(clr) * 1000000; + s_hyst = div_s64(abs(s_thresh - s_clr), 65535); + /* Set new threshold */ + thresh = reg_val; + /* Try to set old hysteresis */ + s_val = min(abs(s_hyst), HDC3020_MAX_HUM_MICRO); + fallthrough; + case IIO_EV_INFO_HYSTERESIS: + /* + * Function hdc3020_thresh_get_hum returns relative + * humidity in percent scaled by 65535. Scale by 1000000 + * to be able to subtract scaled hysteresis value. + */ + s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000; + /* + * Units of s_val are in micro percent, scale by 65535 + * to get same units as s_thresh. + */ + s_hyst = (s64)s_val * 65535; + s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir); + s_clr = max(s_clr, 0); + s_clr = min(s_clr, HDC3020_MAX_HUM_MICRO); + reg = reg_clr_wr; + reg_val = hdc3020_thresh_set_hum(s_clr, clr); + break; + default: + return -EOPNOTSUPP; + } break; default: return -EOPNOTSUPP; } - put_unaligned_be16(reg, buf); - put_unaligned_be16(val, buf + 2); - buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE); - return hdc3020_write_bytes(data, buf, 5); + return _hdc3020_write_thresh(data, reg, reg_val); } static int hdc3020_read_thresh(struct iio_dev *indio_dev, @@ -442,48 +603,60 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev, int *val, int *val2) { struct hdc3020_data *data = iio_priv(indio_dev); - u16 reg; - int ret; + u16 reg_thresh, reg_clr; + int thresh, clr, ret; - /* Select threshold register */ - if (info == IIO_EV_INFO_VALUE) { - if (dir == IIO_EV_DIR_RISING) - reg = HDC3020_R_T_RH_THRESH_HIGH; - else - reg = HDC3020_R_T_RH_THRESH_LOW; + /* Select threshold registers */ + if (dir == IIO_EV_DIR_RISING) { + reg_thresh = HDC3020_R_T_RH_THRESH_HIGH; + reg_clr = HDC3020_R_T_RH_THRESH_HIGH_CLR; } else { - if (dir == IIO_EV_DIR_RISING) - reg = HDC3020_R_T_RH_THRESH_HIGH_CLR; - else - reg = HDC3020_R_T_RH_THRESH_LOW_CLR; + reg_thresh = HDC3020_R_T_RH_THRESH_LOW; + reg_clr = HDC3020_R_T_RH_THRESH_LOW_CLR; } guard(mutex)(&data->lock); - ret = hdc3020_read_be16(data, reg); + ret = hdc3020_read_be16(data, reg_thresh); if (ret < 0) return ret; switch (chan->type) { case IIO_TEMP: - /* - * Get the temperature threshold from 9 LSBs, shift them to get - * the truncated temperature threshold representation and - * calculate the threshold according to the formula in the - * datasheet. - */ - *val = FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret); - *val = *val << HDC3020_THRESH_TEMP_TRUNC_SHIFT; - *val = -2949075 + (175 * (*val)); + thresh = hdc3020_thresh_get_temp(ret); + switch (info) { + case IIO_EV_INFO_VALUE: + *val = thresh; + break; + case IIO_EV_INFO_HYSTERESIS: + ret = hdc3020_read_be16(data, reg_clr); + if (ret < 0) + return ret; + + clr = hdc3020_thresh_get_temp(ret); + *val = abs(thresh - clr); + break; + default: + return -EOPNOTSUPP; + } *val2 = 65535; return IIO_VAL_FRACTIONAL; case IIO_HUMIDITYRELATIVE: - /* - * Get the humidity threshold from 7 MSBs, shift them to get the - * truncated humidity threshold representation and calculate the - * threshold according to the formula in the datasheet. - */ - *val = FIELD_GET(HDC3020_THRESH_HUM_MASK, ret); - *val = (*val << HDC3020_THRESH_HUM_TRUNC_SHIFT) * 100; + thresh = hdc3020_thresh_get_hum(ret); + switch (info) { + case IIO_EV_INFO_VALUE: + *val = thresh; + break; + case IIO_EV_INFO_HYSTERESIS: + ret = hdc3020_read_be16(data, reg_clr); + if (ret < 0) + return ret; + + clr = hdc3020_thresh_get_hum(ret); + *val = abs(thresh - clr); + break; + default: + return -EOPNOTSUPP; + } *val2 = 65535; return IIO_VAL_FRACTIONAL; default: diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 01a499a8b8..438ed35881 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev) return 0; } -static const char *type2str(enum rdma_restrack_type type) -{ - static const char * const names[RDMA_RESTRACK_MAX] = { - [RDMA_RESTRACK_PD] = "PD", - [RDMA_RESTRACK_CQ] = "CQ", - [RDMA_RESTRACK_QP] = "QP", - [RDMA_RESTRACK_CM_ID] = "CM_ID", - [RDMA_RESTRACK_MR] = "MR", - [RDMA_RESTRACK_CTX] = "CTX", - [RDMA_RESTRACK_COUNTER] = "COUNTER", - [RDMA_RESTRACK_SRQ] = "SRQ", - }; - - return names[type]; -}; - /** * rdma_restrack_clean() - clean resource tracking * @dev: IB device @@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type) void rdma_restrack_clean(struct ib_device *dev) { struct rdma_restrack_root *rt = dev->res; - struct rdma_restrack_entry *e; - char buf[TASK_COMM_LEN]; - bool found = false; - const char *owner; int i; for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) { struct xarray *xa = &dev->res[i].xa; - if (!xa_empty(xa)) { - unsigned long index; - - if (!found) { - pr_err("restrack: %s", CUT_HERE); - dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n"); - } - xa_for_each(xa, index, e) { - if (rdma_is_kernel_res(e)) { - owner = e->kern_name; - } else { - /* - * There is no need to call get_task_struct here, - * because we can be here only if there are more - * get_task_struct() call than put_task_struct(). - */ - get_task_comm(buf, e->task); - owner = buf; - } - - pr_err("restrack: %s %s object allocated by %s is not freed\n", - rdma_is_kernel_res(e) ? "Kernel" : - "User", - type2str(e->type), owner); - } - found = true; - } + WARN_ON(!xa_empty(xa)); xa_destroy(xa); } - if (found) - pr_err("restrack: %s", CUT_HERE); - kfree(rt); } diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 31ffdc2a93..79bdb2b109 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -261,8 +261,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data) if (!error && data[0] == 2) { error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1, ILI251X_DATA_SIZE2); - if (error >= 0 && error != ILI251X_DATA_SIZE2) - error = -EIO; + if (error >= 0) + error = error == ILI251X_DATA_SIZE2 ? 0 : -EIO; } return error; diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index f482aab420..95a161fdba 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -56,6 +56,7 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid); void amd_iommu_flush_all_caches(struct amd_iommu *iommu); void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); void amd_iommu_domain_update(struct protection_domain *domain); +void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set); void amd_iommu_domain_flush_complete(struct protection_domain *domain); void amd_iommu_domain_flush_pages(struct protection_domain *domain, u64 address, size_t size); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index e740dc54c4..21798a0fa9 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -2784,6 +2784,7 @@ static void early_enable_iommu(struct amd_iommu *iommu) iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_set_exclusion_range(iommu); + iommu_enable_gt(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); iommu_enable_irtcachedis(iommu); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index fb727f5b0b..e2b900ffbc 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2002,6 +2002,21 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) amd_iommu_apply_erratum_63(iommu, devid); } +/* Update and flush DTE for the given device */ +void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set) +{ + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); + + if (set) + set_dte_entry(iommu, dev_data); + else + clear_dte_entry(iommu, dev_data->devid); + + clone_aliases(iommu, dev_data->dev); + device_flush_dte(dev_data); + iommu_completion_wait(iommu); +} + static int do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { @@ -2036,10 +2051,7 @@ static int do_attach(struct iommu_dev_data *dev_data, } /* Update device table */ - set_dte_entry(iommu, dev_data); - clone_aliases(iommu, dev_data->dev); - - device_flush_dte(dev_data); + amd_iommu_dev_update_dte(dev_data, true); return ret; } @@ -2049,6 +2061,12 @@ static void do_detach(struct iommu_dev_data *dev_data) struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + /* Clear DTE and flush the entry */ + amd_iommu_dev_update_dte(dev_data, false); + + /* Flush IOTLB and wait for the flushes to finish */ + amd_iommu_domain_flush_all(domain); + /* Clear GCR3 table */ if (domain->pd_mode == PD_MODE_V2) { update_gcr3(dev_data, 0, 0, false); @@ -2058,14 +2076,6 @@ static void do_detach(struct iommu_dev_data *dev_data) /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); - clear_dte_entry(iommu, dev_data->devid); - clone_aliases(iommu, dev_data->dev); - - /* Flush the DTE entry */ - device_flush_dte(dev_data); - - /* Flush IOTLB and wait for the flushes to finish */ - amd_iommu_domain_flush_all(domain); /* decrease reference counters - needs to happen after the flushes */ domain->dev_iommu[iommu->index] -= 1; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 2cd433a9c8..41b44baef1 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -569,6 +569,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, int ret = 0; struct mm_struct *mm = domain->mm; + if (mm_get_enqcmd_pasid(mm) != id) + return -EINVAL; + mutex_lock(&sva_lock); ret = __arm_smmu_sva_bind(dev, id, mm); mutex_unlock(&sva_lock); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 72c07a12f5..bfa1d77749 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -568,7 +568,7 @@ config IRQ_LOONGARCH_CPU bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP select LOONGSON_HTVEC select LOONGSON_LIOINTC select LOONGSON_EIOINTC diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index b64cbe3052..f2921b26ba 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -15,6 +15,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/kernel.h> #include <linux/syscore_ops.h> +#include <asm/numa.h> #define EIOINTC_REG_NODEMAP 0x14a0 #define EIOINTC_REG_IPMAP 0x14c0 @@ -339,7 +340,7 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, int node; if (cpu_has_flatmode) - node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); + node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); else node = eiointc_priv[nr_pics - 1]->node; @@ -431,7 +432,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent, goto out_free_handle; if (cpu_has_flatmode) - node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); + node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); else node = acpi_eiointc->node; acpi_set_vec_parent(node, priv->eiointc_domain, pch_group); diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index e4b33aed1c..7c4fe7ab4b 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -28,7 +28,7 @@ #define LIOINTC_INTC_CHIP_START 0x20 -#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20) +#define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8) #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) @@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision, goto out_free_priv; for (i = 0; i < LIOINTC_NUM_CORES; i++) - priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i); for (i = 0; i < LIOINTC_NUM_PARENT; i++) priv->handler[i].parent_int_map = parent_int_map[i]; diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 733d0bc4b4..b43695bc51 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -956,7 +956,7 @@ int dvb_usercopy(struct file *file, int (*func)(struct file *file, unsigned int cmd, void *arg)) { - char sbuf[128]; + char sbuf[128] = {}; void *mbuf = NULL; void *parg = NULL; int err = -EINVAL; diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 9a5f75163a..8ede4ce932 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -131,10 +131,12 @@ struct moxart_host { struct dma_async_tx_descriptor *tx_desc; struct mmc_host *mmc; struct mmc_request *mrq; + struct scatterlist *cur_sg; struct completion dma_complete; struct completion pio_complete; - struct sg_mapping_iter sg_miter; + u32 num_sg; + u32 data_remain; u32 data_len; u32 fifo_width; u32 timeout; @@ -146,6 +148,35 @@ struct moxart_host { bool is_removed; }; +static inline void moxart_init_sg(struct moxart_host *host, + struct mmc_data *data) +{ + host->cur_sg = data->sg; + host->num_sg = data->sg_len; + host->data_remain = host->cur_sg->length; + + if (host->data_remain > host->data_len) + host->data_remain = host->data_len; +} + +static inline int moxart_next_sg(struct moxart_host *host) +{ + int remain; + struct mmc_data *data = host->mrq->cmd->data; + + host->cur_sg++; + host->num_sg--; + + if (host->num_sg > 0) { + host->data_remain = host->cur_sg->length; + remain = host->data_len - data->bytes_xfered; + if (remain > 0 && remain < host->data_remain) + host->data_remain = remain; + } + + return host->num_sg; +} + static int moxart_wait_for_status(struct moxart_host *host, u32 mask, u32 *status) { @@ -278,29 +309,14 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) static void moxart_transfer_pio(struct moxart_host *host) { - struct sg_mapping_iter *sgm = &host->sg_miter; struct mmc_data *data = host->mrq->cmd->data; u32 *sgp, len = 0, remain, status; if (host->data_len == data->bytes_xfered) return; - /* - * By updating sgm->consumes this will get a proper pointer into the - * buffer at any time. - */ - if (!sg_miter_next(sgm)) { - /* This shold not happen */ - dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n"); - data->error = -EINVAL; - complete(&host->pio_complete); - return; - } - sgp = sgm->addr; - remain = sgm->length; - if (remain > host->data_len) - remain = host->data_len; - sgm->consumed = 0; + sgp = sg_virt(host->cur_sg); + remain = host->data_remain; if (data->flags & MMC_DATA_WRITE) { while (remain > 0) { @@ -315,7 +331,6 @@ static void moxart_transfer_pio(struct moxart_host *host) sgp++; len += 4; } - sgm->consumed += len; remain -= len; } @@ -332,22 +347,22 @@ static void moxart_transfer_pio(struct moxart_host *host) sgp++; len += 4; } - sgm->consumed += len; remain -= len; } } - data->bytes_xfered += sgm->consumed; - if (host->data_len == data->bytes_xfered) { + data->bytes_xfered += host->data_remain - remain; + host->data_remain = remain; + + if (host->data_len != data->bytes_xfered) + moxart_next_sg(host); + else complete(&host->pio_complete); - return; - } } static void moxart_prepare_data(struct moxart_host *host) { struct mmc_data *data = host->mrq->cmd->data; - unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */ u32 datactrl; int blksz_bits; @@ -358,19 +373,15 @@ static void moxart_prepare_data(struct moxart_host *host) blksz_bits = ffs(data->blksz) - 1; BUG_ON(1 << blksz_bits != data->blksz); + moxart_init_sg(host, data); + datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE); - if (data->flags & MMC_DATA_WRITE) { - flags |= SG_MITER_FROM_SG; + if (data->flags & MMC_DATA_WRITE) datactrl |= DCR_DATA_WRITE; - } else { - flags |= SG_MITER_TO_SG; - } if (moxart_use_dma(host)) datactrl |= DCR_DMA_EN; - else - sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL); writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR); @@ -443,9 +454,6 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq) } request_done: - if (!moxart_use_dma(host)) - sg_miter_stop(&host->sg_miter); - spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); } diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 9053526fa2..150fb477b7 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -24,6 +24,7 @@ #define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0) #define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1) #define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2) +#define BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY BIT(4) #define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0) #define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1) @@ -384,6 +385,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT) host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + if (!(match_priv->flags & BRCMSTB_MATCH_FLAGS_USE_CARD_BUSY)) + host->mmc_host_ops.card_busy = NULL; + /* Change the base clock frequency if the DT property exists */ if (device_property_read_u32(&pdev->dev, "clock-frequency", &priv->base_freq_hz) != 0) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 025b31aa71..9fe40f342d 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1326,7 +1326,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); if (ret) - return ret; + goto fail; /* * Turn PMOS on [bit 0], set over current detection to 2.4 V @@ -1337,7 +1337,10 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) else scratch &= ~0x47; - return pci_write_config_byte(chip->pdev, 0xAE, scratch); + ret = pci_write_config_byte(chip->pdev, 0xAE, scratch); + +fail: + return pcibios_err_to_errno(ret); } static int jmicron_probe(struct sdhci_pci_chip *chip) @@ -2202,7 +2205,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev, ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); if (ret) - return ret; + return pcibios_err_to_errno(ret); slots = PCI_SLOT_INFO_SLOTS(slots) + 1; dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); @@ -2211,7 +2214,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev, ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); if (ret) - return ret; + return pcibios_err_to_errno(ret); first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index d4a0218478..058bef1c7e 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -823,7 +823,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch &= 0x7f; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); @@ -834,7 +834,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch); if (ret) - return ret; + goto read_fail; scratch |= 0x20; pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); @@ -843,7 +843,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) */ ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); if (ret) - return ret; + goto read_fail; scratch |= 0x01; pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); @@ -856,7 +856,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch); if (ret) - return ret; + goto read_fail; scratch |= 0x08; pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); @@ -864,7 +864,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch |= 0x80; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; @@ -875,7 +875,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch &= 0x7f; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); @@ -886,7 +886,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) O2_SD_FUNC_REG0, &scratch_32); if (ret) - return ret; + goto read_fail; scratch_32 = ((scratch_32 & 0xFF000000) >> 24); /* Check Whether subId is 0x11 or 0x12 */ @@ -898,7 +898,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) O2_SD_FUNC_REG4, &scratch_32); if (ret) - return ret; + goto read_fail; /* Enable Base Clk setting change */ scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET; @@ -921,7 +921,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_dword(chip->pdev, O2_SD_CLK_SETTING, &scratch_32); if (ret) - return ret; + goto read_fail; scratch_32 &= ~(0xFF00); scratch_32 |= 0x07E0C800; @@ -931,14 +931,14 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_dword(chip->pdev, O2_SD_CLKREQ, &scratch_32); if (ret) - return ret; + goto read_fail; scratch_32 |= 0x3; pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32); ret = pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32); if (ret) - return ret; + goto read_fail; scratch_32 &= ~(0x1F3F070E); scratch_32 |= 0x18270106; @@ -949,7 +949,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG2, &scratch_32); if (ret) - return ret; + goto read_fail; scratch_32 &= ~(0xE0); pci_write_config_dword(chip->pdev, O2_SD_CAP_REG2, scratch_32); @@ -961,7 +961,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch |= 0x80; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; @@ -971,7 +971,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch &= 0x7f; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); @@ -979,7 +979,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32); if (ret) - return ret; + goto read_fail; if ((scratch_32 & 0xff000000) == 0x01000000) { scratch_32 &= 0x0000FFFF; @@ -998,7 +998,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) O2_SD_FUNC_REG4, &scratch_32); if (ret) - return ret; + goto read_fail; scratch_32 |= (1 << 22); pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG4, scratch_32); @@ -1017,7 +1017,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch |= 0x80; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; @@ -1028,7 +1028,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) /* UnLock WP */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch &= 0x7f; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); @@ -1057,13 +1057,16 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) /* Lock WP */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); if (ret) - return ret; + goto read_fail; scratch |= 0x80; pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; } return 0; + +read_fail: + return pcibios_err_to_errno(ret); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 746f4cf7ab..112584aa07 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2515,26 +2515,29 @@ EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); static int sdhci_check_ro(struct sdhci_host *host) { - unsigned long flags; + bool allow_invert = false; int is_readonly; - spin_lock_irqsave(&host->lock, flags); - - if (host->flags & SDHCI_DEVICE_DEAD) + if (host->flags & SDHCI_DEVICE_DEAD) { is_readonly = 0; - else if (host->ops->get_ro) + } else if (host->ops->get_ro) { is_readonly = host->ops->get_ro(host); - else if (mmc_can_gpio_ro(host->mmc)) + } else if (mmc_can_gpio_ro(host->mmc)) { is_readonly = mmc_gpio_get_ro(host->mmc); - else + /* Do not invert twice */ + allow_invert = !(host->mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); + } else { is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_WRITE_PROTECT); + allow_invert = true; + } - spin_unlock_irqrestore(&host->lock, flags); + if (is_readonly >= 0 && + allow_invert && + (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)) + is_readonly = !is_readonly; - /* This quirk needs to be replaced by a callback-function later */ - return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? - !is_readonly : is_readonly; + return is_readonly; } #define SAMPLE_COUNT 5 diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c index a16b42a885..3b55b676ca 100644 --- a/drivers/mtd/parsers/redboot.c +++ b/drivers/mtd/parsers/redboot.c @@ -102,7 +102,7 @@ nogood: offset -= master->erasesize; } } else { - offset = directory * master->erasesize; + offset = (unsigned long) directory * master->erasesize; while (mtd_block_isbad(master, offset)) { offset += master->erasesize; if (offset == master->size) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bceda85f0d..cb66310c8d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5773,6 +5773,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev, if (real_dev) { ret = ethtool_get_ts_info_by_layer(real_dev, info); } else { + info->phc_index = -1; + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; /* Check if all slaves support software tx timestamping */ rcu_read_lock(); bond_for_each_slave_rcu(bond, slave, iter) { diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 1d9057dc44..bf1589aef1 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -1618,11 +1618,20 @@ static int mcp251xfd_open(struct net_device *ndev) clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags); can_rx_offload_enable(&priv->offload); + priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq", + WQ_FREEZABLE | WQ_MEM_RECLAIM, + dev_name(&spi->dev)); + if (!priv->wq) { + err = -ENOMEM; + goto out_can_rx_offload_disable; + } + INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync); + err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, IRQF_SHARED | IRQF_ONESHOT, dev_name(&spi->dev), priv); if (err) - goto out_can_rx_offload_disable; + goto out_destroy_workqueue; err = mcp251xfd_chip_interrupts_enable(priv); if (err) @@ -1634,6 +1643,8 @@ static int mcp251xfd_open(struct net_device *ndev) out_free_irq: free_irq(spi->irq, priv); + out_destroy_workqueue: + destroy_workqueue(priv->wq); out_can_rx_offload_disable: can_rx_offload_disable(&priv->offload); set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); @@ -1661,6 +1672,7 @@ static int mcp251xfd_stop(struct net_device *ndev) hrtimer_cancel(&priv->tx_irq_timer); mcp251xfd_chip_interrupts_disable(priv); free_irq(ndev->irq, priv); + destroy_workqueue(priv->wq); can_rx_offload_disable(&priv->offload); mcp251xfd_timestamp_stop(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c index 160528d3cc..b1de8052a4 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c @@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, tx_obj->xfer[0].len = len; } +static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_ring *tx_ring, + int err) +{ + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; + unsigned int frame_len = 0; + u8 tx_head; + + tx_ring->head--; + stats->tx_dropped++; + tx_head = mcp251xfd_get_tx_head(tx_ring); + can_free_echo_skb(ndev, tx_head, &frame_len); + netdev_completed_queue(ndev, 1, frame_len); + netif_wake_queue(ndev); + + if (net_ratelimit()) + netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); +} + +void mcp251xfd_tx_obj_write_sync(struct work_struct *work) +{ + struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv, + tx_work); + struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj; + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + int err; + + err = spi_sync(priv->spi, &tx_obj->msg); + if (err) + mcp251xfd_tx_failure_drop(priv, tx_ring, err); +} + static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, struct mcp251xfd_tx_obj *tx_obj) { @@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, return false; } +static bool mcp251xfd_work_busy(struct work_struct *work) +{ + return work_busy(work); +} + netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) { @@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; - if (mcp251xfd_tx_busy(priv, tx_ring)) + if (mcp251xfd_tx_busy(priv, tx_ring) || + mcp251xfd_work_busy(&priv->tx_work)) return NETDEV_TX_BUSY; tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); @@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, netdev_sent_queue(priv->ndev, frame_len); err = mcp251xfd_tx_obj_write(priv, tx_obj); - if (err) - goto out_err; - - return NETDEV_TX_OK; - - out_err: - netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); + if (err == -EBUSY) { + netif_stop_queue(ndev); + priv->tx_work_obj = tx_obj; + queue_work(priv->wq, &priv->tx_work); + } else if (err) { + mcp251xfd_tx_failure_drop(priv, tx_ring, err); + } return NETDEV_TX_OK; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 24510b3b80..b35bfebd23 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -633,6 +633,10 @@ struct mcp251xfd_priv { struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM]; struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM]; + struct workqueue_struct *wq; + struct work_struct tx_work; + struct mcp251xfd_tx_obj *tx_work_obj; + DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__); u8 rx_ring_num; @@ -952,6 +956,7 @@ void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); +void mcp251xfd_tx_obj_write_sync(struct work_struct *work); netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, struct net_device *ndev); diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 7f745628c8..dde5c65c2c 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -355,10 +355,8 @@ int ksz9477_reset_switch(struct ksz_device *dev) SPI_AUTO_EDGE_DETECTION, 0); /* default configuration */ - ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); - data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING | - SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE; - ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); + ksz_write8(dev, REG_SW_LUE_CTRL_1, + SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER); /* disable interrupts */ ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); @@ -1305,6 +1303,10 @@ int ksz9477_setup(struct dsa_switch *ds) /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true); + /* Use collision based back pressure mode. */ + ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE, + SW_BACK_PRESSURE_COLLISION); + /* Now we can configure default MTU value */ ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK, VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index f3a205ee48..fb124be8ed 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -247,6 +247,7 @@ #define REG_SW_MAC_CTRL_1 0x0331 #define SW_BACK_PRESSURE BIT(5) +#define SW_BACK_PRESSURE_COLLISION 0 #define FAIR_FLOW_CTRL BIT(4) #define NO_EXC_COLLISION_DROP BIT(3) #define SW_JUMBO_PACKET BIT(2) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 2a5861a88d..e54f83a2e7 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2129,7 +2129,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d) struct ksz_device *dev = kirq->dev; int ret; - ret = ksz_write32(dev, kirq->reg_mask, kirq->masked); + ret = ksz_write8(dev, kirq->reg_mask, kirq->masked); if (ret) dev_err(dev->dev, "failed to change IRQ mask\n"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 888509cf1f..40e8818295 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2896,11 +2896,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, static int update_xps(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; - struct cpumask xps_mask; - struct dpaa2_eth_fq *fq; int i, num_queues, netdev_queues; + struct dpaa2_eth_fq *fq; + cpumask_var_t xps_mask; int err = 0; + if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL)) + return -ENOMEM; + num_queues = dpaa2_eth_queue_count(priv); netdev_queues = (net_dev->num_tc ? : 1) * num_queues; @@ -2910,16 +2913,17 @@ static int update_xps(struct dpaa2_eth_priv *priv) for (i = 0; i < netdev_queues; i++) { fq = &priv->fq[i % num_queues]; - cpumask_clear(&xps_mask); - cpumask_set_cpu(fq->target_cpu, &xps_mask); + cpumask_clear(xps_mask); + cpumask_set_cpu(fq->target_cpu, xps_mask); - err = netif_set_xps_queue(net_dev, &xps_mask, i); + err = netif_set_xps_queue(net_dev, xps_mask, i); if (err) { netdev_warn_once(net_dev, "Error setting XPS queue\n"); break; } } + free_cpumask_var(xps_mask); return err; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 30c47b8470..722bb72436 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4057,6 +4057,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) adapter->num_active_tx_scrqs = 0; } + /* Clean any remaining outstanding SKBs + * we freed the irq so we won't be hearing + * from them + */ + clean_tx_pools(adapter); + if (adapter->rx_scrq) { for (i = 0; i < adapter->num_active_rx_scrqs; i++) { if (!adapter->rx_scrq[i]) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 61eef3259c..88d4675cc3 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4102,7 +4102,7 @@ bool ice_is_wol_supported(struct ice_hw *hw) int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) { struct ice_pf *pf = vsi->back; - int err = 0, timeout = 50; + int i, err = 0, timeout = 50; if (!new_rx && !new_tx) return -EINVAL; @@ -4128,6 +4128,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) ice_vsi_close(vsi); ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + + ice_for_each_traffic_class(i) { + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(vsi->netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + } ice_pf_dcb_recfg(pf, locked); ice_vsi_open(vsi); done: diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index f42a1b1c93..653a47dd43 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1490,18 +1490,25 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, return -EBUSY; } -static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci) +static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, + bool pci_reset_sbr_supported) { struct pci_dev *pdev = mlxsw_pci->pdev; char mrsr_pl[MLXSW_REG_MRSR_LEN]; int err; + if (!pci_reset_sbr_supported) { + pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n"); + goto sbr; + } + mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE); err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); if (err) return err; +sbr: device_lock_assert(&pdev->dev); pci_cfg_access_lock(pdev); @@ -1529,6 +1536,7 @@ static int mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) { struct pci_dev *pdev = mlxsw_pci->pdev; + bool pci_reset_sbr_supported = false; char mcam_pl[MLXSW_REG_MCAM_LEN]; bool pci_reset_supported = false; u32 sys_status; @@ -1548,13 +1556,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) mlxsw_reg_mcam_pack(mcam_pl, MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES); err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl); - if (!err) + if (!err) { mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET, &pci_reset_supported); + mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR, + &pci_reset_sbr_supported); + } if (pci_reset_supported) { pci_dbg(pdev, "Starting PCI reset flow\n"); - err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci); + err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci, + pci_reset_sbr_supported); } else { pci_dbg(pdev, "Starting software reset flow\n"); err = mlxsw_pci_reset_sw(mlxsw_pci); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8892654c68..010eecab51 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -10668,6 +10668,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits { MLXSW_REG_MCAM_MCIA_128B = 34, /* If set, MRSR.command=6 is supported. */ MLXSW_REG_MCAM_PCI_RESET = 48, + /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */ + MLXSW_REG_MCAM_PCI_RESET_SBR = 67, }; #define MLXSW_REG_BYTES_PER_DWORD 0x4 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index c9f1c79f3f..ba090262e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -1607,8 +1607,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { + u16 local_port, local_port_1, first_local_port, last_local_port; struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u16 local_port, local_port_1, last_local_port; struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; u8 masked_count, current_page = 0; unsigned long cb_priv = 0; @@ -1628,6 +1628,7 @@ next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE; last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; @@ -1645,9 +1646,12 @@ next_batch: if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, - local_port, 1); + local_port - first_local_port, + 1); } - mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, + local_port - first_local_port, + 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, &bulk_list); @@ -1684,7 +1688,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u16 local_port, last_local_port; + u16 local_port, first_local_port, last_local_port; LIST_HEAD(bulk_list); unsigned int masked_count; u8 current_page = 0; @@ -1702,6 +1706,7 @@ next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE; last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; @@ -1719,9 +1724,12 @@ next_batch: if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, - local_port, 1); + local_port - first_local_port, + 1); } - mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, + local_port - first_local_port, + 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, &bulk_list); diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index d8af5e7e15..191d50ba64 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -2800,6 +2800,8 @@ static int add_adev(struct gdma_dev *gd) if (ret) goto init_fail; + /* madev is owned by the auxiliary device */ + madev = NULL; ret = auxiliary_device_add(adev); if (ret) goto add_fail; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index f30eee4a5a..b6c01a8809 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -375,7 +375,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg); unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, ionic_cq_cb cb, ionic_cq_done_cb done_cb, void *done_arg); -unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do); +unsigned int ionic_tx_cq_service(struct ionic_cq *cq, + unsigned int work_to_do, + bool in_napi); int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, struct ionic_queue *q, unsigned int index, const char *name, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 0cd819bc4a..1dec4ebd70 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1189,7 +1189,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) ionic_rx_service, NULL, NULL); if (lif->hwstamp_txq) - tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget); + tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget); work_done = max(max(n_work, a_work), max(rx_work, tx_work)); if (work_done < budget && napi_complete_done(napi, work_done)) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 2427610f43..9fdd7cd3ef 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -23,7 +23,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q, struct ionic_tx_desc_info *desc_info, - struct ionic_txq_comp *comp); + struct ionic_txq_comp *comp, + bool in_napi); static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell) { @@ -480,6 +481,20 @@ int ionic_xdp_xmit(struct net_device *netdev, int n, return nxmit; } +static void ionic_xdp_rx_put_bufs(struct ionic_queue *q, + struct ionic_buf_info *buf_info, + int nbufs) +{ + int i; + + for (i = 0; i < nbufs; i++) { + dma_unmap_page(q->dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + buf_info->page = NULL; + buf_info++; + } +} + static bool ionic_run_xdp(struct ionic_rx_stats *stats, struct net_device *netdev, struct bpf_prog *xdp_prog, @@ -493,6 +508,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, struct netdev_queue *nq; struct xdp_frame *xdpf; int remain_len; + int nbufs = 1; int frag_len; int err = 0; @@ -542,6 +558,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, if (page_is_pfmemalloc(bi->page)) xdp_buff_set_frag_pfmemalloc(&xdp_buf); } while (remain_len > 0); + nbufs += sinfo->nr_frags; } xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf); @@ -574,9 +591,6 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, goto out_xdp_abort; } - dma_unmap_page(rxq->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - err = ionic_xdp_post_frame(txq, xdpf, XDP_TX, buf_info->page, buf_info->page_offset, @@ -586,23 +600,19 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); goto out_xdp_abort; } - buf_info->page = NULL; + ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); stats->xdp_tx++; /* the Tx completion will free the buffers */ break; case XDP_REDIRECT: - /* unmap the pages before handing them to a different device */ - dma_unmap_page(rxq->dev, buf_info->dma_addr, - IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog); if (err) { netdev_dbg(netdev, "xdp_do_redirect err %d\n", err); goto out_xdp_abort; } - buf_info->page = NULL; + ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs); rxq->xdp_flush = true; stats->xdp_redirect++; break; @@ -935,7 +945,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) u32 work_done = 0; u32 flags = 0; - work_done = ionic_tx_cq_service(cq, budget); + work_done = ionic_tx_cq_service(cq, budget, !!budget); if (unlikely(!budget)) return budget; @@ -1019,7 +1029,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) txqcq = lif->txqcqs[qi]; txcq = &lif->txqcqs[qi]->cq; - tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT); + tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget); if (unlikely(!budget)) return budget; @@ -1152,7 +1162,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q, struct ionic_tx_desc_info *desc_info, - struct ionic_txq_comp *comp) + struct ionic_txq_comp *comp, + bool in_napi) { struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_qcq *qcq = q_to_qcq(q); @@ -1204,11 +1215,13 @@ static void ionic_tx_clean(struct ionic_queue *q, desc_info->bytes = skb->len; stats->clean++; - napi_consume_skb(skb, 1); + napi_consume_skb(skb, likely(in_napi) ? 1 : 0); } static bool ionic_tx_service(struct ionic_cq *cq, - unsigned int *total_pkts, unsigned int *total_bytes) + unsigned int *total_pkts, + unsigned int *total_bytes, + bool in_napi) { struct ionic_tx_desc_info *desc_info; struct ionic_queue *q = cq->bound_q; @@ -1230,7 +1243,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, desc_info->bytes = 0; index = q->tail_idx; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - ionic_tx_clean(q, desc_info, comp); + ionic_tx_clean(q, desc_info, comp, in_napi); if (desc_info->skb) { pkts++; bytes += desc_info->bytes; @@ -1244,7 +1257,9 @@ static bool ionic_tx_service(struct ionic_cq *cq, return true; } -unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) +unsigned int ionic_tx_cq_service(struct ionic_cq *cq, + unsigned int work_to_do, + bool in_napi) { unsigned int work_done = 0; unsigned int bytes = 0; @@ -1253,7 +1268,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) if (work_to_do == 0) return 0; - while (ionic_tx_service(cq, &pkts, &bytes)) { + while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) { if (cq->tail_idx == cq->num_descs - 1) cq->done_color = !cq->done_color; cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); @@ -1279,7 +1294,7 @@ void ionic_tx_flush(struct ionic_cq *cq) { u32 work_done; - work_done = ionic_tx_cq_service(cq, cq->num_descs); + work_done = ionic_tx_cq_service(cq, cq->num_descs, false); if (work_done) ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index, work_done, IONIC_INTR_CRED_RESET_COALESCE); @@ -1296,7 +1311,7 @@ void ionic_tx_empty(struct ionic_queue *q) desc_info = &q->tx_info[q->tail_idx]; desc_info->bytes = 0; q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); - ionic_tx_clean(q, desc_info, NULL); + ionic_tx_clean(q, desc_info, NULL, false); if (desc_info->skb) { pkts++; bytes += desc_info->bytes; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 4b22bb6393..4a28b654ce 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -5094,6 +5094,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK }, + { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8814, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8804, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8841, MICREL_PHY_ID_MASK }, diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 52b71c7e78..e4b759690c 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -385,23 +385,18 @@ static void sfp_fixup_rollball(struct sfp *sfp) sfp->phy_t_retry = msecs_to_jiffies(1000); } -static void sfp_fixup_fs_2_5gt(struct sfp *sfp) +static void sfp_fixup_fs_10gt(struct sfp *sfp) { + sfp_fixup_10gbaset_30m(sfp); sfp_fixup_rollball(sfp); - /* The RollBall fixup is not enough for FS modules, the PHY chip inside + /* The RollBall fixup is not enough for FS modules, the AQR chip inside * them does not return 0xffff for PHY ID registers in all MMDs for the * while initializing. They need a 4 second wait before accessing PHY. */ sfp->module_t_wait = msecs_to_jiffies(4000); } -static void sfp_fixup_fs_10gt(struct sfp *sfp) -{ - sfp_fixup_10gbaset_30m(sfp); - sfp_fixup_fs_2_5gt(sfp); -} - static void sfp_fixup_halny_gsfp(struct sfp *sfp) { /* Ignore the TX_FAULT and LOS signals on this module. @@ -477,10 +472,6 @@ static const struct sfp_quirk sfp_quirks[] = { // Rollball protocol to talk to the PHY. SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt), - // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and - // needs 4 sec wait before probing the PHY. - SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt), - // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd // NRZ in their EEPROM SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex, @@ -497,6 +488,9 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, sfp_fixup_ignore_tx_fault), + // FS 2.5G Base-T + SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g), + // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report // 2500MBd NRZ in their EEPROM SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex), diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 94d8b647f0..bb70b56c97 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -326,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb) if (netif_carrier_ok(dev->net) != link) { usbnet_link_change(dev, link, 1); - netdev_info(dev->net, "ax88179 - Link status is: %d\n", link); + if (!link) + netdev_info(dev->net, "ax88179 - Link status is: 0\n"); } } @@ -1540,6 +1541,7 @@ static int ax88179_link_reset(struct usbnet *dev) GMII_PHY_PHYSR, 2, &tmp16); if (!(tmp16 & GMII_PHY_PHYSR_LINK)) { + netdev_info(dev->net, "ax88179 - Link status is: 0\n"); return 0; } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) { mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ; @@ -1577,6 +1579,8 @@ static int ax88179_link_reset(struct usbnet *dev) netif_carrier_on(dev->net); + netdev_info(dev->net, "ax88179 - Link status is: 1\n"); + return 0; } diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index b2d054f59f..49779ba3c4 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -2336,7 +2336,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct ip_tunnel_key *pkey; struct ip_tunnel_key key; struct vxlan_dev *vxlan = netdev_priv(dev); - const struct iphdr *old_iph = ip_hdr(skb); + const struct iphdr *old_iph; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; unsigned int pkt_len = skb->len; @@ -2350,8 +2350,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, bool use_cache; bool udp_sum = false; bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); + bool no_eth_encap; __be32 vni = 0; + no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB); + if (!skb_vlan_inet_prepare(skb, no_eth_encap)) + goto drop; + + old_iph = ip_hdr(skb); + info = skb_tunnel_info(skb); use_cache = ip_tunnel_dst_cache_usable(skb, info); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 185cd339c0..6c75ebbb21 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -1349,13 +1349,12 @@ dump: static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) { u32 val32; - u16 val16; val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); - val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); - rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); + val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); + rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); rtw89_fw_prog_cnt_dump(rtwdev); } @@ -1394,8 +1393,9 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, return 0; } -int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, - bool include_bb) +static +int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct rtw89_fw_info *fw_info = &rtwdev->fw; @@ -1433,7 +1433,7 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); if (ret) { rtw89_warn(rtwdev, "download firmware fail\n"); - return ret; + goto fwdl_err; } return ret; @@ -1443,6 +1443,21 @@ fwdl_err: return ret; } +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb) +{ + int retry; + int ret; + + for (retry = 0; retry < 5; retry++) { + ret = __rtw89_fw_download(rtwdev, type, include_bb); + if (!ret) + return 0; + } + + return ret; +} + int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) { struct rtw89_fw_info *fw = &rtwdev->fw; diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index dfdff6aba6..d80c3b93d6 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -410,7 +410,29 @@ static ssize_t nvmet_addr_tsas_show(struct config_item *item, return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name); } } - return sprintf(page, "reserved\n"); + return sprintf(page, "\n"); +} + +static u8 nvmet_addr_tsas_rdma_store(const char *page) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) { + if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name)) + return nvmet_addr_tsas_rdma[i].type; + } + return NVMF_RDMA_QPTYPE_INVALID; +} + +static u8 nvmet_addr_tsas_tcp_store(const char *page) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { + if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) + return nvmet_addr_tsas_tcp[i].type; + } + return NVMF_TCP_SECTYPE_INVALID; } static ssize_t nvmet_addr_tsas_store(struct config_item *item, @@ -418,20 +440,19 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item, { struct nvmet_port *port = to_nvmet_port(item); u8 treq = nvmet_port_disc_addr_treq_mask(port); - u8 sectype; - int i; + u8 sectype, qptype; if (nvmet_is_port_enabled(port, __func__)) return -EACCES; - if (port->disc_addr.trtype != NVMF_TRTYPE_TCP) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) { - if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) { - sectype = nvmet_addr_tsas_tcp[i].type; + if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) { + qptype = nvmet_addr_tsas_rdma_store(page); + if (qptype == port->disc_addr.tsas.rdma.qptype) + return count; + } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) { + sectype = nvmet_addr_tsas_tcp_store(page); + if (sectype != NVMF_TCP_SECTYPE_INVALID) goto found; - } } pr_err("Invalid value '%s' for tsas\n", page); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 337ee1cb09..381b439473 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -148,7 +148,7 @@ struct nvmet_fc_tgt_queue { struct workqueue_struct *work_q; struct kref ref; /* array of fcp_iods */ - struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize); + struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */; } __aligned(sizeof(unsigned long long)); struct nvmet_fc_hostport { diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 682fa87747..d9d3f57c6f 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -349,7 +349,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity *affd) { struct irq_affinity_desc *masks = NULL; - struct msi_desc *entry; + struct msi_desc *entry, desc; int ret; /* Reject multi-MSI early on irq domain enabled architectures */ @@ -374,6 +374,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, /* All MSIs are unmasked by default; mask them all */ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); pci_msi_mask(entry, msi_multi_mask(entry)); + /* + * Copy the MSI descriptor for the error path because + * pci_msi_setup_msi_irqs() will free it for the hierarchical + * interrupt domain case. + */ + memcpy(&desc, entry, sizeof(desc)); /* Configure MSI capability structure */ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); @@ -393,7 +399,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, goto unlock; err: - pci_msi_unmask(entry, msi_multi_mask(entry)); + pci_msi_unmask(&desc, msi_multi_mask(&desc)); pci_free_msi_irqs(dev); fail: dev->msi_enabled = 0; diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index cffeb86913..f424a57f00 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1106,8 +1106,8 @@ static struct pinctrl *create_pinctrl(struct device *dev, * an -EPROBE_DEFER later, as that is the worst case. */ if (ret == -EPROBE_DEFER) { - pinctrl_free(p, false); mutex_unlock(&pinctrl_maps_mutex); + pinctrl_free(p, false); return ERR_PTR(ret); } } diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 3bedf36a00..3f56991f5b 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -634,23 +634,68 @@ static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = { static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = { { - .num = 2, - .pin = 12, - .reg = 0x24, - .bit = 8, - .mask = 0x3 - }, { + /* gpio2_b7_sel */ .num = 2, .pin = 15, .reg = 0x28, .bit = 0, .mask = 0x7 }, { + /* gpio2_c7_sel */ .num = 2, .pin = 23, .reg = 0x30, .bit = 14, .mask = 0x3 + }, { + /* gpio3_b1_sel */ + .num = 3, + .pin = 9, + .reg = 0x44, + .bit = 2, + .mask = 0x3 + }, { + /* gpio3_b2_sel */ + .num = 3, + .pin = 10, + .reg = 0x44, + .bit = 4, + .mask = 0x3 + }, { + /* gpio3_b3_sel */ + .num = 3, + .pin = 11, + .reg = 0x44, + .bit = 6, + .mask = 0x3 + }, { + /* gpio3_b4_sel */ + .num = 3, + .pin = 12, + .reg = 0x44, + .bit = 8, + .mask = 0x3 + }, { + /* gpio3_b5_sel */ + .num = 3, + .pin = 13, + .reg = 0x44, + .bit = 10, + .mask = 0x3 + }, { + /* gpio3_b6_sel */ + .num = 3, + .pin = 14, + .reg = 0x44, + .bit = 12, + .mask = 0x3 + }, { + /* gpio3_b7_sel */ + .num = 3, + .pin = 15, + .reg = 0x44, + .bit = 14, + .mask = 0x3 }, }; @@ -2433,6 +2478,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) case RK3188: case RK3288: case RK3308: + case RK3328: case RK3368: case RK3399: case RK3568: @@ -2491,6 +2537,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, case RK3188: case RK3288: case RK3308: + case RK3328: case RK3368: case RK3399: case RK3568: @@ -2704,8 +2751,10 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, if (ret) { /* revert the already done pin settings */ - for (cnt--; cnt >= 0; cnt--) + for (cnt--; cnt >= 0; cnt--) { + bank = pin_to_bank(info, pins[cnt]); rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0); + } return ret; } @@ -2753,6 +2802,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl, case RK3188: case RK3288: case RK3308: + case RK3328: case RK3368: case RK3399: case RK3568: @@ -3763,7 +3813,7 @@ static struct rockchip_pin_bank rk3328_pin_banks[] = { PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0), PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0), PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, - IOMUX_WIDTH_3BIT, + 0, IOMUX_WIDTH_3BIT, 0), PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", @@ -3777,7 +3827,7 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = { .pin_banks = rk3328_pin_banks, .nr_banks = ARRAY_SIZE(rk3328_pin_banks), .label = "RK3328-GPIO", - .type = RK3288, + .type = RK3328, .grf_mux_offset = 0x0, .iomux_recalced = rk3328_mux_recalced_data, .niomux_recalced = ARRAY_SIZE(rk3328_mux_recalced_data), diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h index 4759f33694..849266f8b1 100644 --- a/drivers/pinctrl/pinctrl-rockchip.h +++ b/drivers/pinctrl/pinctrl-rockchip.h @@ -193,6 +193,7 @@ enum rockchip_pinctrl_type { RK3188, RK3288, RK3308, + RK3328, RK3368, RK3399, RK3568, diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index f4e2c88a7c..e61be7d054 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -1206,7 +1206,6 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pm7325-gpio", .data = (void *) 10 }, { .compatible = "qcom,pm7550ba-gpio", .data = (void *) 8}, { .compatible = "qcom,pm8005-gpio", .data = (void *) 4 }, - { .compatible = "qcom,pm8008-gpio", .data = (void *) 2 }, { .compatible = "qcom,pm8019-gpio", .data = (void *) 6 }, /* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */ { .compatible = "qcom,pm8150-gpio", .data = (void *) 10 }, diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 248ab71b9f..747b22cd38 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -2071,11 +2071,11 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl) * This has to be atomically executed to protect against a concurrent * interrupt. */ - raw_spin_lock_irqsave(&pctrl->lock.rlock, flags); + spin_lock_irqsave(&pctrl->lock, flags); ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data)); if (!ret && !irqd_irq_disabled(data)) rzg2l_gpio_irq_enable(data); - raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags); + spin_unlock_irqrestore(&pctrl->lock, flags); if (ret) dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq); diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index 0c028d17c0..5baa487f35 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -309,29 +309,43 @@ unlock: } static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch, - int duty_ns, int period_ns) + u64 duty_ns, u64 period_ns) { - unsigned long long prd, div, dty; - unsigned int prescaler = 0; + unsigned long long prd, dty; + unsigned long long prescaler; u32 ccmr, mask, shift; - /* Period and prescaler values depends on clock rate */ - div = (unsigned long long)clk_get_rate(priv->clk) * period_ns; - - do_div(div, NSEC_PER_SEC); - prd = div; - - while (div > priv->max_arr) { - prescaler++; - div = prd; - do_div(div, prescaler + 1); - } - - prd = div; + /* + * .probe() asserted that clk_get_rate() is not bigger than 1 GHz, so + * the calculations here won't overflow. + * First we need to find the minimal value for prescaler such that + * + * period_ns * clkrate + * ------------------------------ < max_arr + 1 + * NSEC_PER_SEC * (prescaler + 1) + * + * This equation is equivalent to + * + * period_ns * clkrate + * ---------------------------- < prescaler + 1 + * NSEC_PER_SEC * (max_arr + 1) + * + * Using integer division and knowing that the right hand side is + * integer, this is further equivalent to + * + * (period_ns * clkrate) // (NSEC_PER_SEC * (max_arr + 1)) ≤ prescaler + */ + prescaler = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk), + (u64)NSEC_PER_SEC * ((u64)priv->max_arr + 1)); if (prescaler > MAX_TIM_PSC) return -EINVAL; + prd = mul_u64_u64_div_u64(period_ns, clk_get_rate(priv->clk), + (u64)NSEC_PER_SEC * (prescaler + 1)); + if (!prd) + return -EINVAL; + /* * All channels share the same prescaler and counter so when two * channels are active at the same time we can't change them @@ -351,8 +365,8 @@ static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch, regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE); /* Calculate the duty cycles */ - dty = prd * duty_ns; - do_div(dty, period_ns); + dty = mul_u64_u64_div_u64(duty_ns, clk_get_rate(priv->clk), + (u64)NSEC_PER_SEC * (prescaler + 1)); regmap_write(priv->regmap, TIM_CCR1 + 4 * ch, dty); @@ -656,6 +670,18 @@ static int stm32_pwm_probe(struct platform_device *pdev) stm32_pwm_detect_complementary(priv); + ret = devm_clk_rate_exclusive_get(dev, priv->clk); + if (ret) + return dev_err_probe(dev, ret, "Failed to lock clock\n"); + + /* + * With the clk running with not more than 1 GHz the calculations in + * .apply() won't overflow. + */ + if (clk_get_rate(priv->clk) > 1000000000) + return dev_err_probe(dev, -EINVAL, "Clock freq too high (%lu)\n", + clk_get_rate(priv->clk)); + chip->ops = &stm32pwm_ops; /* Initialize clock refcount to number of enabled PWM channels. */ diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 85b27c42cf..f426b4c391 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -68,6 +68,7 @@ config RESET_BRCMSTB_RESCAL config RESET_GPIO tristate "GPIO reset controller" + depends on GPIOLIB help This enables a generic reset controller for resets attached via GPIOs. Typically for OF platforms this driver expects "reset-gpios" diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index d7569f3955..d6491fc84e 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -698,6 +698,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, dma64_t *indicatorp = NULL; int ret, i, queue_idx = 0; struct ccw1 *ccw; + dma32_t indicatorp_dma = 0; ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); if (!ccw) @@ -725,7 +726,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, */ indicatorp = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*indicatorp), - &ccw->cda); + &indicatorp_dma); if (!indicatorp) goto out; *indicatorp = indicators_dma(vcdev); @@ -735,6 +736,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, /* no error, just fall back to legacy interrupts */ vcdev->is_thinint = false; } + ccw->cda = indicatorp_dma; if (!vcdev->is_thinint) { /* Register queue indicators with host. */ *indicators(vcdev) = 0; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 12e2653846..70891a1e98 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -610,15 +610,15 @@ int sas_ata_init(struct domain_device *found_dev) rc = ata_sas_tport_add(ata_host->dev, ap); if (rc) - goto destroy_port; + goto free_port; found_dev->sata_dev.ata_host = ata_host; found_dev->sata_dev.ap = ap; return 0; -destroy_port: - kfree(ap); +free_port: + ata_port_free(ap); free_host: ata_host_put(ata_host); return rc; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 8fb7c41c09..48d975c6db 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref) if (dev_is_sata(dev) && dev->sata_dev.ap) { ata_sas_tport_delete(dev->sata_dev.ap); - kfree(dev->sata_dev.ap); + ata_port_free(dev->sata_dev.ap); ata_host_put(dev->sata_dev.ata_host); dev->sata_dev.ata_host = NULL; dev->sata_dev.ap = NULL; diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 6a1c6b34c4..88f774db92 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -16,7 +16,6 @@ #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/omap-mailbox.h> #include <linux/platform_device.h> #include <linux/remoteproc.h> #include <linux/suspend.h> @@ -314,7 +313,6 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data) static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc) { struct device *dev = m3_ipc->dev; - mbox_msg_t dummy_msg = 0; int ret; if (!m3_ipc->mbox) { @@ -330,7 +328,7 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc) * the RX callback to avoid multiple interrupts being received * by the CM3. */ - ret = mbox_send_message(m3_ipc->mbox, &dummy_msg); + ret = mbox_send_message(m3_ipc->mbox, NULL); if (ret < 0) { dev_err(dev, "%s: mbox_send_message() failed: %d\n", __func__, ret); @@ -352,7 +350,6 @@ static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc) static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc) { struct device *dev = m3_ipc->dev; - mbox_msg_t dummy_msg = 0; int ret; if (!m3_ipc->mbox) { @@ -361,7 +358,7 @@ static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc) return -EIO; } - ret = mbox_send_message(m3_ipc->mbox, &dummy_msg); + ret = mbox_send_message(m3_ipc->mbox, NULL); if (ret < 0) { dev_err(dev, "%s: mbox_send_message() failed: %d\n", __func__, ret); diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 458bb1280e..5b97e420a9 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -288,7 +288,7 @@ struct mxser_board { enum mxser_must_hwid must_hwid; speed_t max_baud; - struct mxser_port ports[] __counted_by(nports); + struct mxser_port ports[] /* __counted_by(nports) */; }; static DECLARE_BITMAP(mxser_boards, MXSER_BOARDS); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 66901d9308..ddf23ccbc9 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -116,6 +116,10 @@ /* RX FIFO occupancy indicator */ #define UART_OMAP_RX_LVL 0x19 +/* Timeout low and High */ +#define UART_OMAP_TO_L 0x26 +#define UART_OMAP_TO_H 0x27 + /* * Copy of the genpd flags for the console. * Only used if console suspend is disabled @@ -664,13 +668,25 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id) /* * On K3 SoCs, it is observed that RX TIMEOUT is signalled after - * FIFO has been drained, in which case a dummy read of RX FIFO - * is required to clear RX TIMEOUT condition. + * FIFO has been drained or erroneously. + * So apply solution of Errata i2310 as mentioned in + * https://www.ti.com/lit/pdf/sprz536 */ if (priv->habit & UART_RX_TIMEOUT_QUIRK && (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT && serial_port_in(port, UART_OMAP_RX_LVL) == 0) { - serial_port_in(port, UART_RX); + unsigned char efr2, timeout_h, timeout_l; + + efr2 = serial_in(up, UART_OMAP_EFR2); + timeout_h = serial_in(up, UART_OMAP_TO_H); + timeout_l = serial_in(up, UART_OMAP_TO_L); + serial_out(up, UART_OMAP_TO_H, 0xFF); + serial_out(up, UART_OMAP_TO_L, 0xFF); + serial_out(up, UART_OMAP_EFR2, UART_OMAP_EFR2_TIMEOUT_BEHAVE); + serial_in(up, UART_IIR); + serial_out(up, UART_OMAP_EFR2, efr2); + serial_out(up, UART_OMAP_TO_H, timeout_h); + serial_out(up, UART_OMAP_TO_L, timeout_l); } /* Stop processing interrupts on input overrun */ diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index e2e4f99f9d..fe0fb2b4e9 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1985,6 +1985,17 @@ enum { MOXA_SUPP_RS485 = BIT(2), }; +static unsigned short moxa_get_nports(unsigned short device) +{ + switch (device) { + case PCI_DEVICE_ID_MOXA_CP116E_A_A: + case PCI_DEVICE_ID_MOXA_CP116E_A_B: + return 8; + } + + return FIELD_GET(0x00F0, device); +} + static bool pci_moxa_is_mini_pcie(unsigned short device) { if (device == PCI_DEVICE_ID_MOXA_CP102N || @@ -2038,7 +2049,7 @@ static int pci_moxa_init(struct pci_dev *dev) { unsigned short device = dev->device; resource_size_t iobar_addr = pci_resource_start(dev, 2); - unsigned int num_ports = (device & 0x00F0) >> 4, i; + unsigned int i, num_ports = moxa_get_nports(device); u8 val, init_mode = MOXA_RS232; if (!(pci_moxa_supported_rs(dev) & MOXA_SUPP_RS232)) { diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 34801a6f30..b88cc28c94 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -308,8 +308,8 @@ static void bcm_uart_do_tx(struct uart_port *port) val = bcm_uart_readl(port, UART_MCTL_REG); val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT; - - pending = uart_port_tx_limited(port, ch, port->fifosize - val, + pending = uart_port_tx_limited_flags(port, ch, UART_TX_NOSTOP, + port->fifosize - val, true, bcm_uart_writel(port, ch, UART_FIFO_REG), ({})); @@ -320,6 +320,9 @@ static void bcm_uart_do_tx(struct uart_port *port) val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); + + if (uart_tx_stopped(port)) + bcm_uart_stop_tx(port); } /* diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 09c1678ddf..9552228d21 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -119,6 +119,7 @@ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ +#define UFCR_RXTL_MASK 0x3F /* Receiver trigger 6 bits wide */ #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) @@ -1941,7 +1942,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio struct serial_rs485 *rs485conf) { struct imx_port *sport = (struct imx_port *)port; - u32 ucr2; + u32 ucr2, ufcr; if (rs485conf->flags & SER_RS485_ENABLED) { /* Enable receiver if low-active RTS signal is requested */ @@ -1960,8 +1961,13 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio /* Make sure Rx is enabled in case Tx is active with Rx disabled */ if (!(rs485conf->flags & SER_RS485_ENABLED) || - rs485conf->flags & SER_RS485_RX_DURING_TX) + rs485conf->flags & SER_RS485_RX_DURING_TX) { + /* If the receiver trigger is 0, set it to a default value */ + ufcr = imx_uart_readl(sport, UFCR); + if ((ufcr & UFCR_RXTL_MASK) == 0) + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); imx_uart_start_rx(port); + } return 0; } diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c index b0604d6da0..58858dd352 100644 --- a/drivers/tty/serial/mcf.c +++ b/drivers/tty/serial/mcf.c @@ -462,7 +462,7 @@ static const struct uart_ops mcf_uart_ops = { .verify_port = mcf_verify_port, }; -static struct mcf_uart mcf_ports[4]; +static struct mcf_uart mcf_ports[10]; #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 4ce7cba2b4..8f3b9a0a38 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -1131,6 +1131,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; + struct usb_endpoint_descriptor *in, *out; int ret; /* instance init */ @@ -1177,6 +1178,19 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, goto fail; } + if (usb_endpoint_xfer_int(&cmd_ep->desc)) + ret = usb_find_common_endpoints(intf->cur_altsetting, + NULL, NULL, &in, &out); + else + ret = usb_find_common_endpoints(intf->cur_altsetting, + &in, &out, NULL, NULL); + + if (ret) { + usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n"); + ret = -ENODEV; + goto fail; + } + if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { usb_fill_int_urb(instance->rcv_urb, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 100041320e..cfe754f480 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -879,12 +879,16 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc) static void dwc3_core_setup_global_control(struct dwc3 *dwc) { + unsigned int power_opt; + unsigned int hw_mode; u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GCTL); reg &= ~DWC3_GCTL_SCALEDOWN_MASK; + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + power_opt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1); - switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) { + switch (power_opt) { case DWC3_GHWPARAMS1_EN_PWROPT_CLK: /** * WORKAROUND: DWC3 revisions between 2.10a and 2.50a have an @@ -917,6 +921,20 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc) break; } + /* + * This is a workaround for STAR#4846132, which only affects + * DWC_usb31 version2.00a operating in host mode. + * + * There is a problem in DWC_usb31 version 2.00a operating + * in host mode that would cause a CSR read timeout When CSR + * read coincides with RAM Clock Gating Entry. By disable + * Clock Gating, sacrificing power consumption for normal + * operation. + */ + if (power_opt != DWC3_GHWPARAMS1_EN_PWROPT_NO && + hw_mode != DWC3_GHWPARAMS0_MODE_GADGET && DWC3_VER_IS(DWC31, 200A)) + reg |= DWC3_GCTL_DSBLCLKGTNG; + /* check if current dwc3 is on simulation board */ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { dev_info(dwc->dev, "Running with FPGA optimizations\n"); @@ -2084,7 +2102,6 @@ assert_reset: static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) { - unsigned long flags; u32 reg; switch (dwc->current_dr_role) { @@ -2122,9 +2139,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) break; if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { - spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); synchronize_irq(dwc->irq_gadget); } @@ -2141,7 +2156,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) { - unsigned long flags; int ret; u32 reg; @@ -2190,9 +2204,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { dwc3_otg_host_init(dwc); } else if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { - spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_resume(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); } break; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index ba7d180cc9..44e20c6c36 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -213,6 +213,7 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget, struct usb_endpoint_descriptor *ss) { switch (gadget->speed) { + case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: return ss; case USB_SPEED_HIGH: @@ -449,11 +450,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) mutex_lock(&dev->lock_printer_io); spin_lock_irqsave(&dev->lock, flags); - if (dev->interface < 0) { - spin_unlock_irqrestore(&dev->lock, flags); - mutex_unlock(&dev->lock_printer_io); - return -ENODEV; - } + if (dev->interface < 0) + goto out_disabled; /* We will use this flag later to check if a printer reset happened * after we turn interrupts back on. @@ -461,6 +459,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) dev->reset_printer = 0; setup_rx_reqs(dev); + /* this dropped the lock - need to retest */ + if (dev->interface < 0) + goto out_disabled; bytes_copied = 0; current_rx_req = dev->current_rx_req; @@ -494,6 +495,8 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) wait_event_interruptible(dev->rx_wait, (likely(!list_empty(&dev->rx_buffers)))); spin_lock_irqsave(&dev->lock, flags); + if (dev->interface < 0) + goto out_disabled; } /* We have data to return then copy it to the caller's buffer.*/ @@ -537,6 +540,9 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) return -EAGAIN; } + if (dev->interface < 0) + goto out_disabled; + /* If we not returning all the data left in this RX request * buffer then adjust the amount of data left in the buffer. * Othewise if we are done with this RX request buffer then @@ -566,6 +572,11 @@ printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) return bytes_copied; else return -EAGAIN; + +out_disabled: + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + return -ENODEV; } static ssize_t @@ -586,11 +597,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) mutex_lock(&dev->lock_printer_io); spin_lock_irqsave(&dev->lock, flags); - if (dev->interface < 0) { - spin_unlock_irqrestore(&dev->lock, flags); - mutex_unlock(&dev->lock_printer_io); - return -ENODEV; - } + if (dev->interface < 0) + goto out_disabled; /* Check if a printer reset happens while we have interrupts on */ dev->reset_printer = 0; @@ -613,6 +621,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) wait_event_interruptible(dev->tx_wait, (likely(!list_empty(&dev->tx_reqs)))); spin_lock_irqsave(&dev->lock, flags); + if (dev->interface < 0) + goto out_disabled; } while (likely(!list_empty(&dev->tx_reqs)) && len) { @@ -662,6 +672,9 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return -EAGAIN; } + if (dev->interface < 0) + goto out_disabled; + list_add(&req->list, &dev->tx_reqs_active); /* here, we unlock, and only unlock, to avoid deadlock. */ @@ -674,6 +687,8 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) mutex_unlock(&dev->lock_printer_io); return -EAGAIN; } + if (dev->interface < 0) + goto out_disabled; } spin_unlock_irqrestore(&dev->lock, flags); @@ -685,6 +700,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return bytes_copied; else return -EAGAIN; + +out_disabled: + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + return -ENODEV; } static int diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 444212c0b5..be3b0bad55 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -1163,8 +1163,6 @@ struct net_device *gether_connect(struct gether *link) if (netif_running(dev->net)) eth_start(dev, GFP_ATOMIC); - netif_device_attach(dev->net); - /* on error, disable any endpoints */ } else { (void) usb_ep_disable(link->out_ep); @@ -1202,7 +1200,7 @@ void gether_disconnect(struct gether *link) DBG(dev, "%s\n", __func__); - netif_device_detach(dev->net); + netif_stop_queue(dev->net); netif_carrier_off(dev->net); /* disable endpoints, forcing (synchronous) completion diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c index 3916c8e2ba..821a6ab5da 100644 --- a/drivers/usb/gadget/udc/aspeed_udc.c +++ b/drivers/usb/gadget/udc/aspeed_udc.c @@ -66,8 +66,8 @@ #define USB_UPSTREAM_EN BIT(0) /* Main config reg */ -#define UDC_CFG_SET_ADDR(x) ((x) & 0x3f) -#define UDC_CFG_ADDR_MASK (0x3f) +#define UDC_CFG_SET_ADDR(x) ((x) & UDC_CFG_ADDR_MASK) +#define UDC_CFG_ADDR_MASK GENMASK(6, 0) /* Interrupt ctrl & status reg */ #define UDC_IRQ_EP_POOL_NAK BIT(17) diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 8abf3a567e..108d9a593a 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -556,7 +556,7 @@ static int da8xx_probe(struct platform_device *pdev) ret = of_platform_populate(pdev->dev.of_node, NULL, da8xx_auxdata_lookup, &pdev->dev); if (ret) - return ret; + goto err_unregister_phy; pinfo = da8xx_dev_info; pinfo.parent = &pdev->dev; @@ -571,9 +571,13 @@ static int da8xx_probe(struct platform_device *pdev) ret = PTR_ERR_OR_ZERO(glue->musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device: %d\n", ret); - usb_phy_generic_unregister(glue->usb_phy); + goto err_unregister_phy; } + return 0; + +err_unregister_phy: + usb_phy_generic_unregister(glue->usb_phy); return ret; } diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 7801501837..911d774c98 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -49,22 +49,16 @@ static int ucsi_read_message_in(struct ucsi *ucsi, void *buf, return ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, buf, buf_size); } -static int ucsi_acknowledge_command(struct ucsi *ucsi) +static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack) { u64 ctrl; ctrl = UCSI_ACK_CC_CI; ctrl |= UCSI_ACK_COMMAND_COMPLETE; - - return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl)); -} - -static int ucsi_acknowledge_connector_change(struct ucsi *ucsi) -{ - u64 ctrl; - - ctrl = UCSI_ACK_CC_CI; - ctrl |= UCSI_ACK_CONNECTOR_CHANGE; + if (conn_ack) { + clear_bit(EVENT_PENDING, &ucsi->flags); + ctrl |= UCSI_ACK_CONNECTOR_CHANGE; + } return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl)); } @@ -77,7 +71,7 @@ static int ucsi_read_error(struct ucsi *ucsi) int ret; /* Acknowledge the command that failed */ - ret = ucsi_acknowledge_command(ucsi); + ret = ucsi_acknowledge(ucsi, false); if (ret) return ret; @@ -89,7 +83,7 @@ static int ucsi_read_error(struct ucsi *ucsi) if (ret) return ret; - ret = ucsi_acknowledge_command(ucsi); + ret = ucsi_acknowledge(ucsi, false); if (ret) return ret; @@ -152,28 +146,33 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) return -EIO; if (cci & UCSI_CCI_NOT_SUPPORTED) { - if (ucsi_acknowledge_command(ucsi) < 0) + if (ucsi_acknowledge(ucsi, false) < 0) dev_err(ucsi->dev, "ACK of unsupported command failed\n"); return -EOPNOTSUPP; } if (cci & UCSI_CCI_ERROR) { - if (cmd == UCSI_GET_ERROR_STATUS) + if (cmd == UCSI_GET_ERROR_STATUS) { + ret = ucsi_acknowledge(ucsi, false); + if (ret) + return ret; + return -EIO; + } return ucsi_read_error(ucsi); } if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) { - ret = ucsi_acknowledge_command(ucsi); + ret = ucsi_acknowledge(ucsi, false); return ret ? ret : -EBUSY; } return UCSI_CCI_LENGTH(cci); } -int ucsi_send_command(struct ucsi *ucsi, u64 command, - void *data, size_t size) +static int ucsi_send_command_common(struct ucsi *ucsi, u64 command, + void *data, size_t size, bool conn_ack) { u8 length; int ret; @@ -192,7 +191,7 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command, goto out; } - ret = ucsi_acknowledge_command(ucsi); + ret = ucsi_acknowledge(ucsi, conn_ack); if (ret) goto out; @@ -201,6 +200,12 @@ out: mutex_unlock(&ucsi->ppm_lock); return ret; } + +int ucsi_send_command(struct ucsi *ucsi, u64 command, + void *data, size_t size) +{ + return ucsi_send_command_common(ucsi, command, data, size, false); +} EXPORT_SYMBOL_GPL(ucsi_send_command); /* -------------------------------------------------------------------------- */ @@ -1157,7 +1162,9 @@ static void ucsi_handle_connector_change(struct work_struct *work) mutex_lock(&con->lock); command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num); - ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status)); + + ret = ucsi_send_command_common(ucsi, command, &con->status, + sizeof(con->status), true); if (ret < 0) { dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n", __func__, ret); @@ -1214,14 +1221,6 @@ static void ucsi_handle_connector_change(struct work_struct *work) if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) ucsi_partner_task(con, ucsi_check_altmodes, 1, 0); - mutex_lock(&ucsi->ppm_lock); - clear_bit(EVENT_PENDING, &con->ucsi->flags); - ret = ucsi_acknowledge_connector_change(ucsi); - mutex_unlock(&ucsi->ppm_lock); - - if (ret) - dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret); - out_unlock: mutex_unlock(&con->lock); } diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index 1d0e2d87e9..4763bd6e55 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -365,6 +365,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev, ret = fwnode_property_read_u32(fwnode, "reg", &port); if (ret < 0) { dev_err(dev, "missing reg property of %pOFn\n", fwnode); + fwnode_handle_put(fwnode); return ret; } @@ -379,9 +380,11 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev, if (!desc) continue; - if (IS_ERR(desc)) + if (IS_ERR(desc)) { + fwnode_handle_put(fwnode); return dev_err_probe(dev, PTR_ERR(desc), "unable to acquire orientation gpio\n"); + } ucsi->port_orientation[port] = desc; ucsi->port_switch[port] = fwnode_typec_switch_get(fwnode); diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c index 93d7806681..1d7ee833eb 100644 --- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c +++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c @@ -64,6 +64,7 @@ struct ucsi_stm32g0 { struct completion complete; struct device *dev; unsigned long flags; +#define ACK_PENDING 2 const char *fw_name; struct ucsi *ucsi; bool suspended; @@ -395,9 +396,13 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const size_t len) { struct ucsi_stm32g0 *g0 = ucsi_get_drvdata(ucsi); + bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI; int ret; - set_bit(COMMAND_PENDING, &g0->flags); + if (ack) + set_bit(ACK_PENDING, &g0->flags); + else + set_bit(COMMAND_PENDING, &g0->flags); ret = ucsi_stm32g0_async_write(ucsi, offset, val, len); if (ret) @@ -405,9 +410,14 @@ static int ucsi_stm32g0_sync_write(struct ucsi *ucsi, unsigned int offset, const if (!wait_for_completion_timeout(&g0->complete, msecs_to_jiffies(5000))) ret = -ETIMEDOUT; + else + return 0; out_clear_bit: - clear_bit(COMMAND_PENDING, &g0->flags); + if (ack) + clear_bit(ACK_PENDING, &g0->flags); + else + clear_bit(COMMAND_PENDING, &g0->flags); return ret; } @@ -428,8 +438,9 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data) if (UCSI_CCI_CONNECTOR(cci)) ucsi_connector_change(g0->ucsi, UCSI_CCI_CONNECTOR(cci)); - if (test_bit(COMMAND_PENDING, &g0->flags) && - cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE)) + if (cci & UCSI_CCI_ACK_COMPLETE && test_and_clear_bit(ACK_PENDING, &g0->flags)) + complete(&g0->complete); + if (cci & UCSI_CCI_COMMAND_COMPLETE && test_and_clear_bit(COMMAND_PENDING, &g0->flags)) complete(&g0->complete); return IRQ_HANDLED; diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 73c89701fc..ac8b5b52e3 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -8,6 +8,7 @@ * */ +#include "linux/virtio_net.h" #include <linux/init.h> #include <linux/module.h> #include <linux/cdev.h> @@ -28,6 +29,7 @@ #include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_ids.h> #include <uapi/linux/virtio_blk.h> +#include <uapi/linux/virtio_ring.h> #include <linux/mod_devicetable.h> #include "iova_domain.h" @@ -1705,13 +1707,17 @@ static bool device_is_allowed(u32 device_id) return false; } -static bool features_is_valid(u64 features) +static bool features_is_valid(struct vduse_dev_config *config) { - if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) + if (!(config->features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return false; /* Now we only support read-only configuration space */ - if (features & (1ULL << VIRTIO_BLK_F_CONFIG_WCE)) + if ((config->device_id == VIRTIO_ID_BLOCK) && + (config->features & BIT_ULL(VIRTIO_BLK_F_CONFIG_WCE))) + return false; + else if ((config->device_id == VIRTIO_ID_NET) && + (config->features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) return false; return true; @@ -1738,7 +1744,7 @@ static bool vduse_validate_config(struct vduse_dev_config *config) if (!device_is_allowed(config->device_id)) return false; - if (!features_is_valid(config->features)) + if (!features_is_valid(config)) return false; return true; |