diff options
Diffstat (limited to 'drivers')
139 files changed, 1508 insertions, 798 deletions
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index b379401ff1..44ca989f16 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -678,12 +678,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, }; +static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* * The Battery Hooking API * @@ -823,7 +829,10 @@ static void __exit battery_hook_exit(void) static int sysfs_add_battery(struct acpi_battery *battery) { - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + }; bool full_cap_broken = false; if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) && @@ -868,7 +877,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) return result; } battery_hook_add_battery(battery); - return device_create_file(&battery->bat->dev, &alarm_attr); + return 0; } static void sysfs_remove_battery(struct acpi_battery *battery) @@ -879,7 +888,6 @@ static void sysfs_remove_battery(struct acpi_battery *battery) return; } battery_hook_remove_battery(battery); - device_remove_file(&battery->bat->dev, &alarm_attr); power_supply_unregister(battery->bat); battery->bat = NULL; mutex_unlock(&battery->sysfs_lock); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b5bf8b81a0..df5d5a554b 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -525,6 +525,20 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { }, }, { + /* Asus Vivobook Pro N6506MU */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506MU"), + }, + }, + { + /* Asus Vivobook Pro N6506MJ */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506MJ"), + }, + }, + { /* LG Electronics 17U70P */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index dc8164b182..442c5905d4 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -77,7 +77,6 @@ struct acpi_battery { u16 spec; u8 id; u8 present:1; - u8 have_sysfs_alarm:1; }; #define to_acpi_battery(x) power_supply_get_drvdata(x) @@ -462,12 +461,18 @@ static ssize_t acpi_battery_alarm_store(struct device *dev, return count; } -static const struct device_attribute alarm_attr = { +static struct device_attribute alarm_attr = { .attr = {.name = "alarm", .mode = 0644}, .show = acpi_battery_alarm_show, .store = acpi_battery_alarm_store, }; +static struct attribute *acpi_battery_attrs[] = { + &alarm_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(acpi_battery); + /* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -518,7 +523,10 @@ static int acpi_battery_read(struct acpi_battery *battery) static int acpi_battery_add(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; - struct power_supply_config psy_cfg = { .drv_data = battery, }; + struct power_supply_config psy_cfg = { + .drv_data = battery, + .attr_grp = acpi_battery_groups, + }; int result; battery->id = id; @@ -548,10 +556,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) goto end; } - result = device_create_file(&battery->bat->dev, &alarm_attr); - if (result) - goto end; - battery->have_sysfs_alarm = 1; end: pr_info("%s [%s]: Battery Slot [%s] (battery %s)\n", ACPI_SBS_DEVICE_NAME, acpi_device_bid(sbs->device), @@ -563,11 +567,8 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id) { struct acpi_battery *battery = &sbs->battery[id]; - if (battery->bat) { - if (battery->have_sysfs_alarm) - device_remove_file(&battery->bat->dev, &alarm_attr); + if (battery->bat) power_supply_unregister(battery->bat); - } } static int acpi_charger_add(struct acpi_sbs *sbs) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 076fbeadce..4e08476011 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -941,8 +941,19 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) &sense_key, &asc, &ascq); ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); } else { - /* ATA PASS-THROUGH INFORMATION AVAILABLE */ - ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D); + /* + * ATA PASS-THROUGH INFORMATION AVAILABLE + * + * Note: we are supposed to call ata_scsi_set_sense(), which + * respects the D_SENSE bit, instead of unconditionally + * generating the sense data in descriptor format. However, + * because hdparm, hddtemp, and udisks incorrectly assume sense + * data in descriptor format, without even looking at the + * RESPONSE CODE field in the returned sense data (to see which + * format the returned sense data is in), we are stuck with + * being bug compatible with older kernels. + */ + scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D); } } diff --git a/drivers/base/core.c b/drivers/base/core.c index 2b4c0624b7..b539926219 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -25,6 +25,7 @@ #include <linux/mutex.h> #include <linux/pm_runtime.h> #include <linux/netdevice.h> +#include <linux/rcupdate.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/string_helpers.h> @@ -2640,6 +2641,7 @@ static const char *dev_uevent_name(const struct kobject *kobj) static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct device *dev = kobj_to_dev(kobj); + struct device_driver *driver; int retval = 0; /* add device node properties if present */ @@ -2668,8 +2670,12 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); - if (dev->driver) - add_uevent_var(env, "DRIVER=%s", dev->driver->name); + /* Synchronize with module_remove_driver() */ + rcu_read_lock(); + driver = READ_ONCE(dev->driver); + if (driver) + add_uevent_var(env, "DRIVER=%s", driver->name); + rcu_read_unlock(); /* Add common DT information about the device */ of_device_uevent(dev, env); @@ -2739,11 +2745,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; - /* Synchronize with really_probe() */ - device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); - device_unlock(dev); if (retval) goto out; diff --git a/drivers/base/module.c b/drivers/base/module.c index a1b55da071..b0b79b9c18 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -7,6 +7,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/rcupdate.h> #include "base.h" static char *make_driver_name(struct device_driver *drv) @@ -97,6 +98,9 @@ void module_remove_driver(struct device_driver *drv) if (!drv) return; + /* Synchronize with dev_uevent() */ + synchronize_rcu(); + sysfs_remove_link(&drv->p->kobj, "module"); if (drv->owner) diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c index be32cd4e84..292e86f601 100644 --- a/drivers/base/regmap/regmap-kunit.c +++ b/drivers/base/regmap/regmap-kunit.c @@ -145,9 +145,9 @@ static struct regmap *gen_regmap(struct kunit *test, const struct regmap_test_param *param = test->param_value; struct regmap_test_priv *priv = test->priv; unsigned int *buf; - struct regmap *ret; + struct regmap *ret = ERR_PTR(-ENOMEM); size_t size; - int i; + int i, error; struct reg_default *defaults; config->cache_type = param->cache; @@ -172,15 +172,17 @@ static struct regmap *gen_regmap(struct kunit *test, *data = kzalloc(sizeof(**data), GFP_KERNEL); if (!(*data)) - return ERR_PTR(-ENOMEM); + goto out_free; (*data)->vals = buf; if (config->num_reg_defaults) { - defaults = kcalloc(config->num_reg_defaults, - sizeof(struct reg_default), - GFP_KERNEL); + defaults = kunit_kcalloc(test, + config->num_reg_defaults, + sizeof(struct reg_default), + GFP_KERNEL); if (!defaults) - return ERR_PTR(-ENOMEM); + goto out_free; + config->reg_defaults = defaults; for (i = 0; i < config->num_reg_defaults; i++) { @@ -190,12 +192,19 @@ static struct regmap *gen_regmap(struct kunit *test, } ret = regmap_init_ram(priv->dev, config, *data); - if (IS_ERR(ret)) { - kfree(buf); - kfree(*data); - } else { - kunit_add_action(test, regmap_exit_action, ret); - } + if (IS_ERR(ret)) + goto out_free; + + /* This calls regmap_exit() on failure, which frees buf and *data */ + error = kunit_add_action_or_reset(test, regmap_exit_action, ret); + if (error) + ret = ERR_PTR(error); + + return ret; + +out_free: + kfree(buf); + kfree(*data); return ret; } @@ -1497,9 +1506,9 @@ static struct regmap *gen_raw_regmap(struct kunit *test, struct regmap_test_priv *priv = test->priv; const struct regmap_test_param *param = test->param_value; u16 *buf; - struct regmap *ret; + struct regmap *ret = ERR_PTR(-ENOMEM); size_t size = (config->max_register + 1) * config->reg_bits / 8; - int i; + int i, error; struct reg_default *defaults; config->cache_type = param->cache; @@ -1515,15 +1524,16 @@ static struct regmap *gen_raw_regmap(struct kunit *test, *data = kzalloc(sizeof(**data), GFP_KERNEL); if (!(*data)) - return ERR_PTR(-ENOMEM); + goto out_free; (*data)->vals = (void *)buf; config->num_reg_defaults = config->max_register + 1; - defaults = kcalloc(config->num_reg_defaults, - sizeof(struct reg_default), - GFP_KERNEL); + defaults = kunit_kcalloc(test, + config->num_reg_defaults, + sizeof(struct reg_default), + GFP_KERNEL); if (!defaults) - return ERR_PTR(-ENOMEM); + goto out_free; config->reg_defaults = defaults; for (i = 0; i < config->num_reg_defaults; i++) { @@ -1536,7 +1546,8 @@ static struct regmap *gen_raw_regmap(struct kunit *test, defaults[i].def = be16_to_cpu(buf[i]); break; default: - return ERR_PTR(-EINVAL); + ret = ERR_PTR(-EINVAL); + goto out_free; } } @@ -1548,12 +1559,19 @@ static struct regmap *gen_raw_regmap(struct kunit *test, config->num_reg_defaults = 0; ret = regmap_init_raw_ram(priv->dev, config, *data); - if (IS_ERR(ret)) { - kfree(buf); - kfree(*data); - } else { - kunit_add_action(test, regmap_exit_action, ret); - } + if (IS_ERR(ret)) + goto out_free; + + /* This calls regmap_exit() on failure, which frees buf and *data */ + error = kunit_add_action_or_reset(test, regmap_exit_action, ret); + if (error) + ret = ERR_PTR(error); + + return ret; + +out_free: + kfree(buf); + kfree(*data); return ret; } diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 6a863328b8..d310b525fb 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -344,7 +344,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) struct ps_data *psdata = &nxpdev->psdata; flush_work(&psdata->work); - del_timer_sync(&psdata->ps_timer); + timer_shutdown_sync(&psdata->ps_timer); } static void ps_control(struct hci_dev *hdev, u8 ps_state) diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 26919556ef..b72b36e0ab 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_channel *ch = dev_id; + unsigned long flags; /* clear flags */ sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & @@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) ch->flags &= ~FLAG_SKIPEVENT; + raw_spin_lock_irqsave(&ch->lock, flags); + if (ch->flags & FLAG_REPROGRAM) { ch->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(ch, 1); @@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) ch->flags &= ~FLAG_IRQCONTEXT; + raw_spin_unlock_irqrestore(&ch->lock, flags); + return IRQ_HANDLED; } @@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + unsigned long flags; BUG_ON(!clockevent_state_oneshot(ced)); + + raw_spin_lock_irqsave(&ch->lock, flags); + if (likely(ch->flags & FLAG_IRQCONTEXT)) ch->next_match_value = delta - 1; else - sh_cmt_set_next(ch, delta - 1); + __sh_cmt_set_next(ch, delta - 1); + + raw_spin_unlock_irqrestore(&ch->lock, flags); return 0; } diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index a092b13ffb..67c4a6a0ef 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -304,10 +304,8 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, int epp = -EINVAL; int ret; - if (!pref_index) { - pr_debug("EPP pref_index is invalid\n"); - return -EINVAL; - } + if (!pref_index) + epp = cpudata->epp_default; if (epp == -EINVAL) epp = epp_values[pref_index]; @@ -1439,7 +1437,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) policy->driver_data = cpudata; - cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0); + cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0); policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; @@ -1766,8 +1764,13 @@ static int __init amd_pstate_init(void) /* check if this machine need CPPC quirks */ dmi_check_system(amd_pstate_quirks_table); - switch (cppc_state) { - case AMD_PSTATE_UNDEFINED: + /* + * determine the driver mode from the command line or kernel config. + * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED. + * command line options will override the kernel config settings. + */ + + if (cppc_state == AMD_PSTATE_UNDEFINED) { /* Disable on the following configs by default: * 1. Undefined platforms * 2. Server platforms @@ -1779,15 +1782,20 @@ static int __init amd_pstate_init(void) pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; } - ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE); - if (ret) - return ret; - break; + /* get driver mode from kernel config option [1:4] */ + cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE; + } + + switch (cppc_state) { case AMD_PSTATE_DISABLE: + pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; case AMD_PSTATE_PASSIVE: case AMD_PSTATE_ACTIVE: case AMD_PSTATE_GUIDED: + ret = amd_pstate_set_driver(cppc_state); + if (ret) + return ret; break; default: return -EINVAL; @@ -1808,7 +1816,7 @@ static int __init amd_pstate_init(void) /* enable amd pstate feature */ ret = amd_pstate_enable(true); if (ret) { - pr_err("failed to enable with return %d\n", ret); + pr_err("failed to enable driver mode(%d)\n", cppc_state); return ret; } diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index e6a28e7f4d..f80b33fa5d 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -99,6 +99,7 @@ struct amd_cpudata { u32 policy; u64 cppc_cap1_cached; bool suspended; + s16 epp_default; }; #endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index fa62367ee9..1a9aadd4c8 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/module.h> +#include <linux/nospec.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/seq_file.h> @@ -198,7 +199,7 @@ gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum) if (hwnum >= gdev->ngpio) return ERR_PTR(-EINVAL); - return &gdev->descs[hwnum]; + return &gdev->descs[array_index_nospec(hwnum, gdev->ngpio)]; } EXPORT_SYMBOL_GPL(gpio_device_get_desc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ee7df1d84e..89cf9ac6da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4048,6 +4048,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); + mutex_init(&adev->virt.rlcg_reg_lock); hash_init(adev->mn_hash); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index e4742b6503..4a9cec0026 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -262,9 +262,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, struct dma_fence *fence = NULL; int r; - /* Ignore soft recovered fences here */ r = drm_sched_entity_error(s_entity); - if (r && r != -ENODATA) + if (r) goto error; if (!fence && job->gang_submit) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c index ca5c86e5f7..8e8afbd237 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c @@ -334,7 +334,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size set_ta_context_funcs(psp, ta_type, &context); - if (!context->initialized) { + if (!context || !context->initialized) { dev_err(adev->dev, "TA is not initialized\n"); ret = -EINVAL; goto err_free_shared_buf; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1adc81a557..0c4ee06451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2172,12 +2172,15 @@ static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_dispatch_if *info) { - struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); - struct ras_ih_data *data = &obj->ih_data; + struct ras_manager *obj; + struct ras_ih_data *data; + obj = amdgpu_ras_find_obj(adev, &info->head); if (!obj) return -EINVAL; + data = &obj->ih_data; + if (data->inuse == 0) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 54ab51a4ad..972a58f0f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -980,6 +980,9 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; + + mutex_lock(&adev->virt.rlcg_reg_lock); + if (reg_access_ctrl->spare_int) spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; @@ -1036,6 +1039,9 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f } ret = readl(scratch_reg0); + + mutex_unlock(&adev->virt.rlcg_reg_lock); + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 642f1fd287..0ec246c745 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -272,6 +272,8 @@ struct amdgpu_virt { /* the ucode id to signal the autoload */ uint32_t autoload_ucode_id; + + struct mutex rlcg_reg_lock; }; struct amdgpu_video_codec_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 66e8a01612..9b748d7058 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -102,6 +102,11 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, if (!r) r = amdgpu_sync_push_to_job(&sync, p->job); amdgpu_sync_free(&sync); + + if (r) { + p->num_dw_left = 0; + amdgpu_job_free(p->job); + } return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 31e500859a..9248525124 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1658,7 +1658,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, start = map_start << PAGE_SHIFT; end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { - struct hmm_range *hmm_range; + struct hmm_range *hmm_range = NULL; unsigned long map_start_vma; unsigned long map_last_vma; struct vm_area_struct *vma; @@ -1696,7 +1696,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } svm_range_lock(prange); - if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) { + + /* Free backing memory of hmm_range if it was initialized + * Overrride return value to TRY AGAIN only if prior returns + * were successful + */ + if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) { pr_debug("hmm update the range, need validate again\n"); r = -EAGAIN; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3cdcadd41b..836bf9ba62 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2701,7 +2701,8 @@ static int dm_suspend(void *handle) dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); - dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + if (dm->cached_dc_state) + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); amdgpu_dm_commit_zero_streams(dm->dc); @@ -2943,6 +2944,7 @@ static int dm_resume(void *handle) commit_params.streams = dc_state->streams; commit_params.stream_count = dc_state->stream_count; + dc_exit_ips_for_hw_access(dm->dc); WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); dm_gpureset_commit_state(dm->cached_dc_state, dm); @@ -3015,7 +3017,8 @@ static int dm_resume(void *handle) emulated_link_detect(aconnector->dc_link); } else { mutex_lock(&dm->dc_lock); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + dc_exit_ips_for_hw_access(dm->dc); + dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4); mutex_unlock(&dm->dc_lock); } @@ -3351,6 +3354,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); + struct dc *dc = aconnector->dc_link->ctx->dc; bool ret = false; if (adev->dm.disable_hpd_irq) @@ -3385,6 +3389,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_kms_helper_connector_hotplug_event(connector); } else { mutex_lock(&adev->dm.dc_lock); + dc_exit_ips_for_hw_access(dc); ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); mutex_unlock(&adev->dm.dc_lock); if (ret) { @@ -3444,6 +3449,7 @@ static void handle_hpd_rx_irq(void *param) bool has_left_work = false; int idx = dc_link->link_index; struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; + struct dc *dc = aconnector->dc_link->ctx->dc; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); @@ -3533,6 +3539,7 @@ out: bool ret = false; mutex_lock(&adev->dm.dc_lock); + dc_exit_ips_for_hw_access(dc); ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); mutex_unlock(&adev->dm.dc_lock); @@ -4639,6 +4646,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) bool ret = false; mutex_lock(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); ret = dc_link_detect(link, DETECT_REASON_BOOT); mutex_unlock(&dm->dc_lock); @@ -6788,7 +6796,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; - dc_sink_retain(aconnector->dc_sink); + if (aconnector->dc_sink) + dc_sink_retain(aconnector->dc_sink); } } @@ -7615,7 +7624,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); - amdgpu_dm_connector_add_common_modes(encoder, connector); + if (encoder) + amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); @@ -8945,7 +8955,8 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, memset(&position, 0, sizeof(position)); mutex_lock(&dm->dc_lock); - dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); + dc_exit_ips_for_hw_access(dm->dc); + dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position); mutex_unlock(&dm->dc_lock); } @@ -9014,6 +9025,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ @@ -9379,6 +9391,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) mutex_lock(&dm->dc_lock); + dc_exit_ips_for_hw_access(dm->dc); dc_update_planes_and_stream(dm->dc, dummy_updates, status->plane_count, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index a5e1a93dda..b50010ed76 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -182,6 +182,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) dc_sink_release(dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; + aconnector->dsc_aux = NULL; + port->passthrough_aux = NULL; } aconnector->mst_status = MST_STATUS_DEFAULT; @@ -494,6 +496,8 @@ dm_dp_mst_detect(struct drm_connector *connector, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; + aconnector->dsc_aux = NULL; + port->passthrough_aux = NULL; amdgpu_dm_set_mst_status(&aconnector->mst_status, MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, @@ -1233,14 +1237,6 @@ static bool is_dsc_need_re_compute( if (!aconnector || !aconnector->dsc_aux) continue; - /* - * check if cached virtual MST DSC caps are available and DSC is supported - * as per specifications in their Virtual DPCD registers. - */ - if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported || - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) - continue; - stream_on_link[new_stream_on_link_num] = aconnector; new_stream_on_link_num++; @@ -1268,6 +1264,9 @@ static bool is_dsc_need_re_compute( } } + if (new_stream_on_link_num == 0) + return false; + /* check current_state if there stream on link but it is not in * new request state */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 8a4c40b4c2..311c62d2d1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -1254,7 +1254,7 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, /* turn off cursor */ if (crtc_state && crtc_state->stream) { mutex_lock(&adev->dm.dc_lock); - dc_stream_set_cursor_position(crtc_state->stream, + dc_stream_program_cursor_position(crtc_state->stream, &position); mutex_unlock(&adev->dm.dc_lock); } @@ -1284,11 +1284,11 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, if (crtc_state->stream) { mutex_lock(&adev->dm.dc_lock); - if (!dc_stream_set_cursor_attributes(crtc_state->stream, + if (!dc_stream_program_cursor_attributes(crtc_state->stream, &attributes)) DRM_ERROR("DC failed to set cursor attributes\n"); - if (!dc_stream_set_cursor_position(crtc_state->stream, + if (!dc_stream_program_cursor_position(crtc_state->stream, &position)) DRM_ERROR("DC failed to set cursor position\n"); mutex_unlock(&adev->dm.dc_lock); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 15819416a2..8ed5993246 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2267,6 +2267,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) otg_master = resource_get_otg_master_for_stream( &state->res_ctx, state->streams[stream_idx]); + + if (!otg_master) + continue; + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); } if (state->phantom_stream_count > 0) { @@ -2508,6 +2512,17 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx, } } +static int get_num_of_free_pipes(const struct resource_pool *pool, const struct dc_state *context) +{ + int i; + int count = 0; + + for (i = 0; i < pool->pipe_count; i++) + if (resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], FREE_PIPE)) + count++; + return count; +} + enum dc_status resource_add_otg_master_for_stream_output(struct dc_state *new_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) @@ -2641,37 +2656,33 @@ static bool acquire_secondary_dpp_pipes_and_add_plane( struct dc_state *cur_ctx, struct resource_pool *pool) { - struct pipe_ctx *opp_head_pipe, *sec_pipe, *tail_pipe; + struct pipe_ctx *sec_pipe, *tail_pipe; + struct pipe_ctx *opp_heads[MAX_PIPES]; + int opp_head_count; + int i; if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) { ASSERT(0); return false; } - opp_head_pipe = otg_master_pipe; - while (opp_head_pipe) { + opp_head_count = resource_get_opp_heads_for_otg_master(otg_master_pipe, + &new_ctx->res_ctx, opp_heads); + if (get_num_of_free_pipes(pool, new_ctx) < opp_head_count) + /* not enough free pipes */ + return false; + + for (i = 0; i < opp_head_count; i++) { sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe( cur_ctx, new_ctx, pool, - opp_head_pipe); - if (!sec_pipe) { - /* try tearing down MPCC combine */ - int pipe_idx = acquire_first_split_pipe( - &new_ctx->res_ctx, pool, - otg_master_pipe->stream); - - if (pipe_idx >= 0) - sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; - } - - if (!sec_pipe) - return false; - + opp_heads[i]); + ASSERT(sec_pipe); sec_pipe->plane_state = plane_state; /* establish pipe relationship */ - tail_pipe = get_tail_pipe(opp_head_pipe); + tail_pipe = get_tail_pipe(opp_heads[i]); tail_pipe->bottom_pipe = sec_pipe; sec_pipe->top_pipe = tail_pipe; sec_pipe->bottom_pipe = NULL; @@ -2682,8 +2693,6 @@ static bool acquire_secondary_dpp_pipes_and_add_plane( } else { sec_pipe->prev_odm_pipe = NULL; } - - opp_head_pipe = opp_head_pipe->next_odm_pipe; } return true; } @@ -2696,6 +2705,7 @@ bool resource_append_dpp_pipes_for_plane_composition( struct dc_plane_state *plane_state) { bool success; + if (otg_master_pipe->plane_state == NULL) success = add_plane_to_opp_head_pipes(otg_master_pipe, plane_state, new_ctx); @@ -2703,10 +2713,15 @@ bool resource_append_dpp_pipes_for_plane_composition( success = acquire_secondary_dpp_pipes_and_add_plane( otg_master_pipe, plane_state, new_ctx, cur_ctx, pool); - if (success) + if (success) { /* when appending a plane mpc slice count changes from 0 to 1 */ success = update_pipe_params_after_mpc_slice_count_change( plane_state, new_ctx, pool); + if (!success) + resource_remove_dpp_pipes_for_plane_composition(new_ctx, + pool, plane_state); + } + return success; } @@ -2716,6 +2731,7 @@ void resource_remove_dpp_pipes_for_plane_composition( const struct dc_plane_state *plane_state) { int i; + for (i = pool->pipe_count - 1; i >= 0; i--) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 76bb05f4d6..52a1cfc5fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -437,6 +437,19 @@ enum dc_status dc_state_remove_stream( return DC_OK; } +static void remove_mpc_combine_for_stream(const struct dc *dc, + struct dc_state *new_ctx, + const struct dc_state *cur_ctx, + struct dc_stream_status *status) +{ + int i; + + for (i = 0; i < status->plane_count; i++) + resource_update_pipes_for_plane_with_slice_count( + new_ctx, cur_ctx, dc->res_pool, + status->plane_states[i], 1); +} + bool dc_state_add_plane( const struct dc *dc, struct dc_stream_state *stream, @@ -447,8 +460,12 @@ bool dc_state_add_plane( struct pipe_ctx *otg_master_pipe; struct dc_stream_status *stream_status = NULL; bool added = false; + int odm_slice_count; + int i; stream_status = dc_state_get_stream_status(state, stream); + otg_master_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); if (stream_status == NULL) { dm_error("Existing stream not found; failed to attach surface!\n"); goto out; @@ -456,22 +473,39 @@ bool dc_state_add_plane( dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", plane_state, MAX_SURFACE_NUM); goto out; + } else if (!otg_master_pipe) { + goto out; } - if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) - /* ODM combine could prevent us from supporting more planes - * we will reset ODM slice count back to 1 when all planes have - * been removed to maximize the amount of planes supported when - * new planes are added. - */ - resource_update_pipes_for_stream_with_slice_count( - state, dc->current_state, dc->res_pool, stream, 1); + added = resource_append_dpp_pipes_for_plane_composition(state, + dc->current_state, pool, otg_master_pipe, plane_state); - otg_master_pipe = resource_get_otg_master_for_stream( - &state->res_ctx, stream); - if (otg_master_pipe) + if (!added) { + /* try to remove MPC combine to free up pipes */ + for (i = 0; i < state->stream_count; i++) + remove_mpc_combine_for_stream(dc, state, + dc->current_state, + &state->stream_status[i]); added = resource_append_dpp_pipes_for_plane_composition(state, - dc->current_state, pool, otg_master_pipe, plane_state); + dc->current_state, pool, + otg_master_pipe, plane_state); + } + + if (!added) { + /* try to decrease ODM slice count gradually to free up pipes */ + odm_slice_count = resource_get_odm_slice_count(otg_master_pipe); + for (i = odm_slice_count - 1; i > 0; i--) { + resource_update_pipes_for_stream_with_slice_count(state, + dc->current_state, dc->res_pool, stream, + i); + added = resource_append_dpp_pipes_for_plane_composition( + state, + dc->current_state, pool, + otg_master_pipe, plane_state); + if (added) + break; + } + } if (added) { stream_status->plane_states[stream_status->plane_count] = @@ -531,15 +565,6 @@ bool dc_state_remove_plane( stream_status->plane_states[stream_status->plane_count] = NULL; - if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) - /* ODM combine could prevent us from supporting more planes - * we will reset ODM slice count back to 1 when all planes have - * been removed to maximize the amount of planes supported when - * new planes are added. - */ - resource_update_pipes_for_stream_with_slice_count( - state, dc->current_state, dc->res_pool, stream, 1); - return true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 5c7e4884ca..53bc991b6e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -266,7 +266,6 @@ bool dc_stream_set_cursor_attributes( const struct dc_cursor_attributes *attributes) { struct dc *dc; - bool reset_idle_optimizations = false; if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); @@ -297,20 +296,36 @@ bool dc_stream_set_cursor_attributes( stream->cursor_attributes = *attributes; - dc_z10_restore(dc); - /* disable idle optimizations while updating cursor */ - if (dc->idle_optimizations_allowed) { - dc_allow_idle_optimizations(dc, false); - reset_idle_optimizations = true; - } + return true; +} - program_cursor_attributes(dc, stream, attributes); +bool dc_stream_program_cursor_attributes( + struct dc_stream_state *stream, + const struct dc_cursor_attributes *attributes) +{ + struct dc *dc; + bool reset_idle_optimizations = false; - /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) - dc_allow_idle_optimizations(dc, true); + dc = stream ? stream->ctx->dc : NULL; - return true; + if (dc_stream_set_cursor_attributes(stream, attributes)) { + dc_z10_restore(dc); + /* disable idle optimizations while updating cursor */ + if (dc->idle_optimizations_allowed) { + dc_allow_idle_optimizations(dc, false); + reset_idle_optimizations = true; + } + + program_cursor_attributes(dc, stream, attributes); + + /* re-enable idle optimizations if necessary */ + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) + dc_allow_idle_optimizations(dc, true); + + return true; + } + + return false; } static void program_cursor_position( @@ -355,9 +370,6 @@ bool dc_stream_set_cursor_position( struct dc_stream_state *stream, const struct dc_cursor_position *position) { - struct dc *dc; - bool reset_idle_optimizations = false; - if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); return false; @@ -368,24 +380,46 @@ bool dc_stream_set_cursor_position( return false; } + stream->cursor_position = *position; + + + return true; +} + +bool dc_stream_program_cursor_position( + struct dc_stream_state *stream, + const struct dc_cursor_position *position) +{ + struct dc *dc; + bool reset_idle_optimizations = false; + const struct dc_cursor_position *old_position; + + if (!stream) + return false; + + old_position = &stream->cursor_position; dc = stream->ctx->dc; - dc_z10_restore(dc); - /* disable idle optimizations if enabling cursor */ - if (dc->idle_optimizations_allowed && (!stream->cursor_position.enable || dc->debug.exit_idle_opt_for_cursor_updates) - && position->enable) { - dc_allow_idle_optimizations(dc, false); - reset_idle_optimizations = true; - } + if (dc_stream_set_cursor_position(stream, position)) { + dc_z10_restore(dc); - stream->cursor_position = *position; + /* disable idle optimizations if enabling cursor */ + if (dc->idle_optimizations_allowed && + (!old_position->enable || dc->debug.exit_idle_opt_for_cursor_updates) && + position->enable) { + dc_allow_idle_optimizations(dc, false); + reset_idle_optimizations = true; + } - program_cursor_position(dc, stream, position); - /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) - dc_allow_idle_optimizations(dc, true); + program_cursor_position(dc, stream, position); + /* re-enable idle optimizations if necessary */ + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) + dc_allow_idle_optimizations(dc, true); - return true; + return true; + } + + return false; } bool dc_stream_add_writeback(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e5dbbc6089..1039dfb0b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -470,10 +470,18 @@ bool dc_stream_set_cursor_attributes( struct dc_stream_state *stream, const struct dc_cursor_attributes *attributes); +bool dc_stream_program_cursor_attributes( + struct dc_stream_state *stream, + const struct dc_cursor_attributes *attributes); + bool dc_stream_set_cursor_position( struct dc_stream_state *stream, const struct dc_cursor_position *position); +bool dc_stream_program_cursor_position( + struct dc_stream_state *stream, + const struct dc_cursor_position *position); + bool dc_stream_adjust_vmin_vmax(struct dc *dc, struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 4f559a025c..09cf54586f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -84,7 +84,7 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data); - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); /* Below loops 1000 x 500us = 500 ms. * Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at @@ -127,7 +127,7 @@ static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int pow cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt; cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst; - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* @@ -209,8 +209,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, else copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; - - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); return true; } @@ -231,7 +230,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub, cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data); cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal; - dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } /* diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 0c4aef8ffe..3306684e80 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -288,6 +288,7 @@ static void dcn10_log_color_state(struct dc *dc, { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; + bool is_gamut_remap_available = false; int i; DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" @@ -300,16 +301,15 @@ static void dcn10_log_color_state(struct dc *dc, struct dcn_dpp_state s = {0}; dpp->funcs->dpp_read_state(dpp, &s); - dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + if (dpp->funcs->dpp_get_gamut_remap) { + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); + is_gamut_remap_available = true; + } if (!s.is_enabled) continue; - DTN_INFO("[%2d]: %11xh %11s %9s %9s" - " %12s " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld " - "%010lld %010lld %010lld %010lld", + DTN_INFO("[%2d]: %11xh %11s %9s %9s", dpp->inst, s.igam_input_format, (s.igam_lut_mode == 0) ? "BypassFixed" : @@ -328,22 +328,27 @@ static void dcn10_log_color_state(struct dc *dc, ((s.rgam_lut_mode == 2) ? "Ycc" : ((s.rgam_lut_mode == 3) ? "RAM" : ((s.rgam_lut_mode == 4) ? "RAM" : - "Unknown")))), - (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : - ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : - "SW"), - s.gamut_remap.temperature_matrix[0].value, - s.gamut_remap.temperature_matrix[1].value, - s.gamut_remap.temperature_matrix[2].value, - s.gamut_remap.temperature_matrix[3].value, - s.gamut_remap.temperature_matrix[4].value, - s.gamut_remap.temperature_matrix[5].value, - s.gamut_remap.temperature_matrix[6].value, - s.gamut_remap.temperature_matrix[7].value, - s.gamut_remap.temperature_matrix[8].value, - s.gamut_remap.temperature_matrix[9].value, - s.gamut_remap.temperature_matrix[10].value, - s.gamut_remap.temperature_matrix[11].value); + "Unknown"))))); + if (is_gamut_remap_available) + DTN_INFO(" %12s " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld " + "%010lld %010lld %010lld %010lld", + (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"), + s.gamut_remap.temperature_matrix[0].value, + s.gamut_remap.temperature_matrix[1].value, + s.gamut_remap.temperature_matrix[2].value, + s.gamut_remap.temperature_matrix[3].value, + s.gamut_remap.temperature_matrix[4].value, + s.gamut_remap.temperature_matrix[5].value, + s.gamut_remap.temperature_matrix[6].value, + s.gamut_remap.temperature_matrix[7].value, + s.gamut_remap.temperature_matrix[8].value, + s.gamut_remap.temperature_matrix[9].value, + s.gamut_remap.temperature_matrix[10].value, + s.gamut_remap.temperature_matrix[11].value); + DTN_INFO("\n"); } DTN_INFO("\n"); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index ed9141a67d..4c47061533 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -919,6 +919,9 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) stream = dc->current_state->streams[0]; plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL); + if (!stream || !plane) + return false; + if (stream && plane) { cursor_cache_enable = stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part; @@ -1038,7 +1041,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) /* Use copied cursor, and it's okay to not switch back */ cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; - dc_stream_set_cursor_attributes(stream, &cursor_attr); + dc_stream_program_cursor_attributes(stream, &cursor_attr); } /* Enable MALL */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index 3e6c7be7e2..5302d2c9c7 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -165,7 +165,12 @@ static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( link_res->hpo_dp_link_enc, tp_params); } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); + + // Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1 + if (tp_params->dp_phy_pattern == DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE) + msleep(30); } static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index b53ad18dbf..ec9ff5f8bd 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2313,8 +2313,6 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_audio_stream(pipe_ctx); - edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false); - update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); @@ -2368,6 +2366,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); } + edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false); if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 0fcf0b8530..564246983f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -454,7 +454,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, * If we got sink count changed it means * Downstream port status changed, * then DM should call DC to do the detection. - * NOTE: Do not handle link loss on eDP since it is internal link*/ + * NOTE: Do not handle link loss on eDP since it is internal link + */ if ((link->connector_signal != SIGNAL_TYPE_EDP) && dp_parse_link_loss_status( link, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 0a939437e1..6b380e037e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -2193,10 +2193,11 @@ bool dcn20_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { - return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( - dc->res_pool->hubbub, - input, - output); + if (dc->res_pool->hubbub->funcs->get_dcc_compression_cap) + return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( + dc->res_pool->hubbub, input, output); + + return false; } static void dcn20_destroy_resource_pool(struct resource_pool **pool) diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 5fb21a0508..f531ce1d2b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -929,7 +929,7 @@ static int pp_dpm_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, bool en) { struct pp_hwmgr *hwmgr = handle; - long workload; + long workload[1]; uint32_t index; if (!hwmgr || !hwmgr->pm_en) @@ -947,12 +947,12 @@ static int pp_dpm_switch_power_profile(void *handle, hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); index = fls(hwmgr->workload_mask); index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; } else { hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]); index = fls(hwmgr->workload_mask); index = index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; } if (type == PP_SMC_POWER_PROFILE_COMPUTE && @@ -962,7 +962,7 @@ static int pp_dpm_switch_power_profile(void *handle, } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c index 1d829402cd..f4bd8e9357 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c @@ -269,7 +269,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set struct pp_power_state *new_ps) { uint32_t index; - long workload; + long workload[1]; if (hwmgr->not_vf) { if (!skip_display_settings) @@ -294,10 +294,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { index = fls(hwmgr->workload_mask); index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; - workload = hwmgr->workload_setting[index]; + workload[0] = hwmgr->workload_setting[index]; - if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode) - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); + if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode) + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); } return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 1fcd445100..f1c369945a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr) static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; struct smu7_hwmgr *data; int result = 0; @@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) /* Initalize Dynamic State Adjustment Rule Settings */ result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); - if (0 == result) { - struct amdgpu_device *adev = hwmgr->adev; + if (result) + goto fail; - data->is_tlu_enabled = false; + data->is_tlu_enabled = false; - hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = SMU7_MAX_HARDWARE_POWERLEVELS; - hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; - hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - data->pcie_gen_cap = adev->pm.pcie_gen_mask; - if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - data->pcie_spc_cap = 20; - else - data->pcie_spc_cap = 16; - data->pcie_lane_cap = adev->pm.pcie_mlw_mask; - - hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ -/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ - hwmgr->platform_descriptor.clockStep.engineClock = 500; - hwmgr->platform_descriptor.clockStep.memoryClock = 500; - smu7_thermal_parameter_init(hwmgr); - } else { - /* Ignore return value in here, we are cleaning up a mess. */ - smu7_hwmgr_backend_fini(hwmgr); - } + data->pcie_gen_cap = adev->pm.pcie_gen_mask; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + else + data->pcie_spc_cap = 16; + data->pcie_lane_cap = adev->pm.pcie_mlw_mask; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ + /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + smu7_thermal_parameter_init(hwmgr); result = smu7_update_edc_leakage_table(hwmgr); - if (result) { - smu7_hwmgr_backend_fini(hwmgr); - return result; - } + if (result) + goto fail; return 0; +fail: + smu7_hwmgr_backend_fini(hwmgr); + return result; } static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) @@ -3316,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, const struct pp_power_state *current_ps) { struct amdgpu_device *adev = hwmgr->adev; - struct smu7_power_state *smu7_ps = - cast_phw_smu7_power_state(&request_ps->hardware); + struct smu7_power_state *smu7_ps; uint32_t sclk; uint32_t mclk; struct PP_Clocks minimum_clocks = {0}; @@ -3334,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, uint32_t latency; bool latency_allowed = false; + smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware); + if (!smu7_ps) + return -EINVAL; + data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); data->mclk_ignore_signal = false; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b015a601b3..eb744401e0 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1065,16 +1065,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *prequest_ps, const struct pp_power_state *pcurrent_ps) { - struct smu8_power_state *smu8_ps = - cast_smu8_power_state(&prequest_ps->hardware); - - const struct smu8_power_state *smu8_current_ps = - cast_const_smu8_power_state(&pcurrent_ps->hardware); - + struct smu8_power_state *smu8_ps; + const struct smu8_power_state *smu8_current_ps; struct smu8_hwmgr *data = hwmgr->backend; struct PP_Clocks clocks = {0, 0, 0, 0}; bool force_high; + smu8_ps = cast_smu8_power_state(&prequest_ps->hardware); + smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware); + + if (!smu8_ps || !smu8_current_ps) + return -EINVAL; + smu8_ps->need_dfs_bypass = true; data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 9f5bd998c6..f4acdb2267 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -3259,8 +3259,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, const struct pp_power_state *current_ps) { struct amdgpu_device *adev = hwmgr->adev; - struct vega10_power_state *vega10_ps = - cast_phw_vega10_power_state(&request_ps->hardware); + struct vega10_power_state *vega10_ps; uint32_t sclk; uint32_t mclk; struct PP_Clocks minimum_clocks = {0}; @@ -3278,6 +3277,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; uint32_t latency; + vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware); + if (!vega10_ps) + return -EINVAL; + data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); @@ -3415,13 +3418,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co const struct vega10_power_state *vega10_ps = cast_const_phw_vega10_power_state(states->pnew_state); struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); - uint32_t sclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].gfx_clock; struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); - uint32_t mclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].mem_clock; + uint32_t sclk, mclk; uint32_t i; + if (vega10_ps == NULL) + return -EINVAL; + sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) break; @@ -3728,6 +3735,9 @@ static int vega10_generate_dpm_level_enable_mask( cast_const_phw_vega10_power_state(states->pnew_state); int i; + if (vega10_ps == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps), "Attempt to Trim DPM States Failed!", return -1); @@ -4995,6 +5005,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr, vega10_psa = cast_const_phw_vega10_power_state(pstate1); vega10_psb = cast_const_phw_vega10_power_state(pstate2); + if (vega10_psa == NULL || vega10_psb == NULL) + return -EINVAL; /* If the two states don't even have the same number of performance levels * they cannot be the same state. @@ -5128,6 +5140,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return -EINVAL; vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return -EINVAL; vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].gfx_clock = @@ -5179,6 +5193,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) return -EINVAL; vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return -EINVAL; vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].mem_clock = @@ -5420,6 +5436,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) return; vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return; + max_level = vega10_ps->performance_level_count - 1; if (vega10_ps->performance_levels[max_level].gfx_clock != @@ -5442,6 +5461,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1)); vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + if (vega10_ps == NULL) + return; + max_level = vega10_ps->performance_level_count - 1; if (vega10_ps->performance_levels[max_level].gfx_clock != @@ -5632,6 +5654,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ return -EINVAL; vega10_ps = cast_const_phw_vega10_power_state(state); + if (vega10_ps == NULL) + return -EINVAL; i = index > vega10_ps->performance_level_count - 1 ? vega10_ps->performance_level_count - 1 : index; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index e1796ecf9c..06409133b0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2220,7 +2220,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, { int ret = 0; int index = 0; - long workload; + long workload[1]; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (!skip_display_settings) { @@ -2260,10 +2260,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; - if (smu->power_profile_mode != workload) - smu_bump_power_profile_mode(smu, &workload, 0); + if (smu->power_profile_mode != workload[0]) + smu_bump_power_profile_mode(smu, workload, 0); } return ret; @@ -2313,7 +2313,7 @@ static int smu_switch_power_profile(void *handle, { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - long workload; + long workload[1]; uint32_t index; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2326,17 +2326,17 @@ static int smu_switch_power_profile(void *handle, smu->workload_mask &= ~(1 << smu->workload_prority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; } else { smu->workload_mask |= (1 << smu->workload_prority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - smu_bump_power_profile_mode(smu, &workload, 0); + smu_bump_power_profile_mode(smu, workload, 0); return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 6a4f20fccf..7b0bc9704e 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, u32 status_reg; u8 *buffer = msg->buffer; unsigned int i; - int num_transferred = 0; int ret; /* Buffer size of AUX CH is 16 bytes */ @@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, reg = buffer[i]; writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + 4 * i); - num_transferred++; } } @@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + 4 * i); buffer[i] = (unsigned char)reg; - num_transferred++; } } @@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ) msg->reply = DP_AUX_NATIVE_REPLY_ACK; - return num_transferred > 0 ? num_transferred : -EBUSY; + return msg->size; aux_error: /* if aux err happen, reset aux */ diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 68831f4e50..fc2ceae61d 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -4069,6 +4069,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { const struct drm_dp_connection_status_notify *conn_stat = &up_req->msg.u.conn_stat; + bool handle_csn; drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", conn_stat->port_number, @@ -4077,6 +4078,16 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) conn_stat->message_capability_status, conn_stat->input_port, conn_stat->peer_device_type); + + mutex_lock(&mgr->probe_lock); + handle_csn = mgr->mst_primary->link_address_sent; + mutex_unlock(&mgr->probe_lock); + + if (!handle_csn) { + drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); + kfree(up_req); + goto out; + } } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = &up_req->msg.u.resource_stat; diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 02b1235c6d..106292d6ed 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -1067,23 +1067,16 @@ int drm_atomic_set_property(struct drm_atomic_state *state, } if (async_flip && - prop != config->prop_fb_id && - prop != config->prop_in_fence_fd && - prop != config->prop_fb_damage_clips) { + (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY || + (prop != config->prop_fb_id && + prop != config->prop_in_fence_fd && + prop != config->prop_fb_damage_clips))) { ret = drm_atomic_plane_get_property(plane, plane_state, prop, &old_val); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); break; } - if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) { - drm_dbg_atomic(prop->dev, - "[OBJECT:%d] Only primary planes can be changed during async flip\n", - obj->id); - ret = -EINVAL; - break; - } - ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 31af5cf37a..cee5eafbfb 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -880,6 +880,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, kfree(modeset->mode); modeset->mode = drm_mode_duplicate(dev, mode); + if (!modeset->mode) { + ret = -ENOMEM; + break; + } + drm_connector_get(connector); modeset->connectors[modeset->num_connectors++] = connector; modeset->x = offset->x; diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 071668bfe5..6c33331367 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) static int cnp_num_backlight_controllers(struct drm_i915_private *i915) { + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) + return 2; + if (INTEL_PCH_TYPE(i915) >= PCH_DG1) return 1; diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 0ccbf9a859..eca07436d1 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915) if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) return 2; + if (INTEL_PCH_TYPE(i915) >= PCH_MTL) + return 2; + if (INTEL_PCH_TYPE(i915) >= PCH_DG1) return 1; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index a2195e28b6..cac6d41845 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -290,6 +290,41 @@ out: return i915_error_to_vmf_fault(err); } +static void set_address_limits(struct vm_area_struct *area, + struct i915_vma *vma, + unsigned long obj_offset, + unsigned long *start_vaddr, + unsigned long *end_vaddr) +{ + unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ + long start, end; /* memory boundaries */ + + /* + * Let's move into the ">> PAGE_SHIFT" + * domain to be sure not to lose bits + */ + vm_start = area->vm_start >> PAGE_SHIFT; + vm_end = area->vm_end >> PAGE_SHIFT; + vma_size = vma->size >> PAGE_SHIFT; + + /* + * Calculate the memory boundaries by considering the offset + * provided by the user during memory mapping and the offset + * provided for the partial mapping. + */ + start = vm_start; + start -= obj_offset; + start += vma->gtt_view.partial.offset; + end = start + vma_size; + + start = max_t(long, start, vm_start); + end = min_t(long, end, vm_end); + + /* Let's move back into the "<< PAGE_SHIFT" domain */ + *start_vaddr = (unsigned long)start << PAGE_SHIFT; + *end_vaddr = (unsigned long)end << PAGE_SHIFT; +} + static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) { #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) @@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) struct i915_ggtt *ggtt = to_gt(i915)->ggtt; bool write = area->vm_flags & VM_WRITE; struct i915_gem_ww_ctx ww; + unsigned long obj_offset; + unsigned long start, end; /* memory boundaries */ intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; + unsigned long pfn; int srcu; int ret; - /* We don't use vmf->pgoff since that has the fake offset */ + obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; + page_offset += obj_offset; trace_i915_gem_object_fault(obj, page_offset, true, write); @@ -402,12 +441,14 @@ retry: if (ret) goto err_unpin; + set_address_limits(area, vma, obj_offset, &start, &end); + + pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; + pfn += (start - area->vm_start) >> PAGE_SHIFT; + pfn += obj_offset - vma->gtt_view.partial.offset; + /* Finally, remap it using the new GTT offset */ - ret = remap_io_mapping(area, - area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start), - &ggtt->iomap); + ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); if (ret) goto err_fence; @@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma mmo = mmap_offset_attach(obj, mmap_type, NULL); if (IS_ERR(mmo)) return PTR_ERR(mmo); + + vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); } /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index e6f177183c..5c72462d1f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : obj->mm.region, &places[0], obj->bo_offset, obj->base.size, flags); - places[0].flags |= TTM_PL_FLAG_DESIRED; /* Cache this on object? */ for (i = 0; i < num_allowed; ++i) { @@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, .interruptible = true, .no_wait_gpu = false, }; - int real_num_busy; + struct ttm_placement initial_placement; + struct ttm_place initial_place; int ret; /* First try only the requested placement. No eviction. */ - real_num_busy = placement->num_placement; - placement->num_placement = 1; - ret = ttm_bo_validate(bo, placement, &ctx); + initial_placement.num_placement = 1; + memcpy(&initial_place, placement->placement, sizeof(struct ttm_place)); + initial_place.flags |= TTM_PL_FLAG_DESIRED; + initial_placement.placement = &initial_place; + ret = ttm_bo_validate(bo, &initial_placement, &ctx); if (ret) { ret = i915_ttm_err_to_gem(ret); /* @@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, * If the initial attempt fails, allow all accepted placements, * evicting if necessary. */ - placement->num_placement = real_num_busy; ret = ttm_bo_validate(bo, placement, &ctx); if (ret) return i915_ttm_err_to_gem(ret); diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 739c865b55..10bce18b7c 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -501,3 +501,4 @@ module_platform_driver(lima_platform_driver); MODULE_AUTHOR("Lima Project Developers"); MODULE_DESCRIPTION("Lima DRM Driver"); MODULE_LICENSE("GPL v2"); +MODULE_SOFTDEP("pre: governor_simpleondemand"); diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index 423eb302be..4caeb68f30 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c @@ -31,6 +31,8 @@ #include <linux/i2c.h> #include <linux/pci.h> +#include <drm/drm_managed.h> + #include "mgag200_drv.h" static int mga_i2c_read_gpio(struct mga_device *mdev) @@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data) return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0; } -static void mgag200_i2c_release(void *res) +static void mgag200_i2c_release(struct drm_device *dev, void *res) { struct mga_i2c_chan *i2c = res; @@ -114,7 +116,7 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) i2c->adapter.algo_data = &i2c->bit; i2c->bit.udelay = 10; - i2c->bit.timeout = 2; + i2c->bit.timeout = usecs_to_jiffies(2200); i2c->bit.data = i2c; i2c->bit.setsda = mga_gpio_setsda; i2c->bit.setscl = mga_gpio_setscl; @@ -125,5 +127,5 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) if (ret) return ret; - return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c); + return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c); } diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h index b7f22597ee..969a8fb0ee 100644 --- a/drivers/gpu/drm/radeon/pptable.h +++ b/drivers/gpu/drm/radeon/pptable.h @@ -439,7 +439,7 @@ typedef struct _StateArray{ //how many states we have UCHAR ucNumEntries; - ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries); + ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */; }StateArray; diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c index 91202e40cd..60c6527827 100644 --- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -102,6 +102,17 @@ static void drm_gem_shmem_test_obj_create_private(struct kunit *test) sg_init_one(sgt->sgl, buf, TEST_SIZE); + /* + * Set the DMA mask to 64-bits and map the sgtables + * otherwise drm_gem_shmem_free will cause a warning + * on debug kernels. + */ + ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64)); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + KUNIT_ASSERT_EQ(test, ret, 0); + /* Init a mock DMA-BUF */ buf_mock.size = TEST_SIZE; attach_mock.dmabuf = &buf_mock; diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index af71b87d80..03c6d4d50a 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -44,9 +44,10 @@ #define GSCCS_RING_BASE 0x11a000 #define RING_TAIL(base) XE_REG((base) + 0x30) +#define TAIL_ADDR REG_GENMASK(20, 3) #define RING_HEAD(base) XE_REG((base) + 0x34) -#define HEAD_ADDR 0x001FFFFC +#define HEAD_ADDR REG_GENMASK(20, 2) #define RING_START(base) XE_REG((base) + 0x38) @@ -135,7 +136,6 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define STOP_RING REG_BIT(8) -#define TAIL_ADDR 0x001FFFF8 #define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8) #define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index e4e3658e6a..0f42971ff0 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1429,8 +1429,8 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) !xe_sched_job_completed(job)) || xe_sched_invalidate_job(job, 2)) { trace_xe_sched_job_ban(job); - xe_sched_tdr_queue_imm(&q->guc->sched); set_exec_queue_banned(q); + xe_sched_tdr_queue_imm(&q->guc->sched); } } } diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 453e601ddd..d37f1dea9f 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -200,9 +200,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va PKG_PWR_LIM_1_EN, 0, channel); if (reg_val & PKG_PWR_LIM_1_EN) { + drm_warn(>_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n"); ret = -EOPNOTSUPP; - goto unlock; } + goto unlock; } /* Computation in 64-bits to avoid overflow. Round to nearest. */ diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 615bbc372a..d7bf7bc9dc 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1354,7 +1354,10 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) if (!snapshot) return NULL; - snapshot->context_desc = lower_32_bits(xe_lrc_ggtt_addr(lrc)); + if (lrc->bo && lrc->bo->vm) + xe_vm_get(lrc->bo->vm); + + snapshot->context_desc = xe_lrc_ggtt_addr(lrc); snapshot->head = xe_lrc_ring_head(lrc); snapshot->tail.internal = lrc->ring.tail; snapshot->tail.memory = xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL); @@ -1370,12 +1373,14 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) { struct xe_bo *bo; + struct xe_vm *vm; struct iosys_map src; if (!snapshot) return; bo = snapshot->lrc_bo; + vm = bo->vm; snapshot->lrc_bo = NULL; snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL); @@ -1395,6 +1400,8 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) dma_resv_unlock(bo->ttm.base.resv); put_bo: xe_bo_put(bo); + if (vm) + xe_vm_put(vm); } void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p) @@ -1440,7 +1447,13 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot) return; kvfree(snapshot->lrc_snapshot); - if (snapshot->lrc_bo) + if (snapshot->lrc_bo) { + struct xe_vm *vm; + + vm = snapshot->lrc_bo->vm; xe_bo_put(snapshot->lrc_bo); + if (vm) + xe_vm_put(vm); + } kfree(snapshot); } diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c index 7d50c6e89d..5b243b7feb 100644 --- a/drivers/gpu/drm/xe/xe_preempt_fence.c +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c @@ -23,11 +23,19 @@ static void preempt_fence_work_func(struct work_struct *w) q->ops->suspend_wait(q); dma_fence_signal(&pfence->base); - dma_fence_end_signalling(cookie); - + /* + * Opt for keep everything in the fence critical section. This looks really strange since we + * have just signalled the fence, however the preempt fences are all signalled via single + * global ordered-wq, therefore anything that happens in this callback can easily block + * progress on the entire wq, which itself may prevent other published preempt fences from + * ever signalling. Therefore try to keep everything here in the callback in the fence + * critical section. For example if something below grabs a scary lock like vm->lock, + * lockdep should complain since we also hold that lock whilst waiting on preempt fences to + * complete. + */ xe_vm_queue_rebind_worker(q->vm); - xe_exec_queue_put(q); + dma_fence_end_signalling(cookie); } static const char * diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index fb44cc7521..10326bd1bf 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -200,7 +200,7 @@ static void rtp_mark_active(struct xe_device *xe, if (first == last) bitmap_set(ctx->active_entries, first, 1); else - bitmap_set(ctx->active_entries, first, last - first + 2); + bitmap_set(ctx->active_entries, first, last - first + 1); } /** diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index 65f1f16282..2bfff99845 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -263,7 +263,7 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync) if (sync->fence) dma_fence_put(sync->fence); if (sync->chain_fence) - dma_fence_put(&sync->chain_fence->base); + dma_fence_chain_free(sync->chain_fence); if (sync->ufence) user_fence_put(sync->ufence); } diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c index 2c7c92272f..f8f22b8a67 100644 --- a/drivers/hwmon/corsair-psu.c +++ b/drivers/hwmon/corsair-psu.c @@ -875,15 +875,16 @@ static const struct hid_device_id corsairpsu_idtable[] = { { HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */ { HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */ { HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */ - { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Series 2022 */ - { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */ + { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i Legacy */ + { HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i Legacy */ { HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */ { HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */ { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */ { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */ { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */ { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i Series 2023 */ - { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Series 2022 and 2023 */ + { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i Legacy and Series 2023 */ + { HID_USB_DEVICE(0x1b1c, 0x1c23) }, /* Corsair HX1200i Series 2023 */ { }, }; MODULE_DEVICE_TABLE(hid, corsairpsu_idtable); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 0a8b95ce35..365e37bba0 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -990,8 +990,11 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev) return ret; ret = geni_se_resources_on(&gi2c->se); - if (ret) + if (ret) { + clk_disable_unprepare(gi2c->core_clk); + geni_icc_disable(&gi2c->se); return ret; + } enable_irq(gi2c->irq); gi2c->suspended = 0; diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 97f338b123..25bc7b8d98 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -34,6 +34,7 @@ static int smbus_do_alert(struct device *dev, void *addrp) struct i2c_client *client = i2c_verify_client(dev); struct alert_data *data = addrp; struct i2c_driver *driver; + int ret; if (!client || client->addr != data->addr) return 0; @@ -47,16 +48,47 @@ static int smbus_do_alert(struct device *dev, void *addrp) device_lock(dev); if (client->dev.driver) { driver = to_i2c_driver(client->dev.driver); - if (driver->alert) + if (driver->alert) { + /* Stop iterating after we find the device */ driver->alert(client, data->type, data->data); - else + ret = -EBUSY; + } else { dev_warn(&client->dev, "no driver alert()!\n"); - } else + ret = -EOPNOTSUPP; + } + } else { dev_dbg(&client->dev, "alert with no driver\n"); + ret = -ENODEV; + } + device_unlock(dev); + + return ret; +} + +/* Same as above, but call back all drivers with alert handler */ + +static int smbus_do_alert_force(struct device *dev, void *addrp) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct alert_data *data = addrp; + struct i2c_driver *driver; + + if (!client || (client->flags & I2C_CLIENT_TEN)) + return 0; + + /* + * Drivers should either disable alerts, or provide at least + * a minimal handler. Lock so the driver won't change. + */ + device_lock(dev); + if (client->dev.driver) { + driver = to_i2c_driver(client->dev.driver); + if (driver->alert) + driver->alert(client, data->type, data->data); + } device_unlock(dev); - /* Stop iterating after we find the device */ - return -EBUSY; + return 0; } /* @@ -67,6 +99,7 @@ static irqreturn_t smbus_alert(int irq, void *d) { struct i2c_smbus_alert *alert = d; struct i2c_client *ara; + unsigned short prev_addr = I2C_CLIENT_END; /* Not a valid address */ ara = alert->ara; @@ -94,8 +127,25 @@ static irqreturn_t smbus_alert(int irq, void *d) data.addr, data.data); /* Notify driver for the device which issued the alert */ - device_for_each_child(&ara->adapter->dev, &data, - smbus_do_alert); + status = device_for_each_child(&ara->adapter->dev, &data, + smbus_do_alert); + /* + * If we read the same address more than once, and the alert + * was not handled by a driver, it won't do any good to repeat + * the loop because it will never terminate. Try again, this + * time calling the alert handlers of all devices connected to + * the bus, and abort the loop afterwards. If this helps, we + * are all set. If it doesn't, there is nothing else we can do, + * so we might as well abort the loop. + * Note: This assumes that a driver with alert handler handles + * the alert properly and clears it if necessary. + */ + if (data.addr == prev_addr && status != -EBUSY) { + device_for_each_child(&ara->adapter->dev, &data, + smbus_do_alert_force); + break; + } + prev_addr = data.addr; } return IRQ_HANDLED; diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index 9d8f2c4060..b35903a069 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle; static u32 lpic_gsi_to_irq(u32 gsi) { + int irq = 0; + /* Only pch irqdomain transferring is required for LoongArch. */ if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) - return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); + irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); - return 0; + return (irq > 0) ? irq : 0; } static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 58881d3139..244a8d489c 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -64,6 +64,20 @@ struct mbigen_device { void __iomem *base; }; +static inline unsigned int get_mbigen_node_offset(unsigned int nid) +{ + unsigned int offset = nid * MBIGEN_NODE_OFFSET; + + /* + * To avoid touched clear register in unexpected way, we need to directly + * skip clear register when access to more than 10 mbigen nodes. + */ + if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET)) + offset += MBIGEN_NODE_OFFSET; + + return offset; +} + static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) { unsigned int nid, pin; @@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq) nid = hwirq / IRQS_PER_MBIGEN_NODE + 1; pin = hwirq % IRQS_PER_MBIGEN_NODE; - return pin * 4 + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_VEC_OFFSET; + return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET; } static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, @@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, *mask = 1 << (irq_ofst % 32); ofst = irq_ofst / 32 * 4; - *addr = ofst + nid * MBIGEN_NODE_OFFSET - + REG_MBIGEN_TYPE_OFFSET; + *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET; } static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c index 9a17919085..a4c3b57098 100644 --- a/drivers/irqchip/irq-meson-gpio.c +++ b/drivers/irqchip/irq-meson-gpio.c @@ -178,7 +178,7 @@ struct meson_gpio_irq_controller { void __iomem *base; u32 channel_irqs[MAX_NUM_CHANNEL]; DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL); - spinlock_t lock; + raw_spinlock_t lock; }; static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, @@ -187,14 +187,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, unsigned long flags; u32 tmp; - spin_lock_irqsave(&ctl->lock, flags); + raw_spin_lock_irqsave(&ctl->lock, flags); tmp = readl_relaxed(ctl->base + reg); tmp &= ~mask; tmp |= val; writel_relaxed(tmp, ctl->base + reg); - spin_unlock_irqrestore(&ctl->lock, flags); + raw_spin_unlock_irqrestore(&ctl->lock, flags); } static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl) @@ -244,12 +244,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, unsigned long flags; unsigned int idx; - spin_lock_irqsave(&ctl->lock, flags); + raw_spin_lock_irqsave(&ctl->lock, flags); /* Find a free channel */ idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels); if (idx >= ctl->params->nr_channels) { - spin_unlock_irqrestore(&ctl->lock, flags); + raw_spin_unlock_irqrestore(&ctl->lock, flags); pr_err("No channel available\n"); return -ENOSPC; } @@ -257,7 +257,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, /* Mark the channel as used */ set_bit(idx, ctl->channel_map); - spin_unlock_irqrestore(&ctl->lock, flags); + raw_spin_unlock_irqrestore(&ctl->lock, flags); /* * Setup the mux of the channel to route the signal of the pad @@ -567,7 +567,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node * if (!ctl) return -ENOMEM; - spin_lock_init(&ctl->lock); + raw_spin_lock_init(&ctl->lock); ctl->base = of_iomap(node, 0); if (!ctl->base) { diff --git a/drivers/irqchip/irq-riscv-aplic-msi.c b/drivers/irqchip/irq-riscv-aplic-msi.c index 028444af48..d7773f76e5 100644 --- a/drivers/irqchip/irq-riscv-aplic-msi.c +++ b/drivers/irqchip/irq-riscv-aplic-msi.c @@ -32,15 +32,10 @@ static void aplic_msi_irq_unmask(struct irq_data *d) aplic_irq_unmask(d); } -static void aplic_msi_irq_eoi(struct irq_data *d) +static void aplic_msi_irq_retrigger_level(struct irq_data *d) { struct aplic_priv *priv = irq_data_get_irq_chip_data(d); - /* - * EOI handling is required only for level-triggered interrupts - * when APLIC is in MSI mode. - */ - switch (irqd_get_trigger_type(d)) { case IRQ_TYPE_LEVEL_LOW: case IRQ_TYPE_LEVEL_HIGH: @@ -59,6 +54,29 @@ static void aplic_msi_irq_eoi(struct irq_data *d) } } +static void aplic_msi_irq_eoi(struct irq_data *d) +{ + /* + * EOI handling is required only for level-triggered interrupts + * when APLIC is in MSI mode. + */ + aplic_msi_irq_retrigger_level(d); +} + +static int aplic_msi_irq_set_type(struct irq_data *d, unsigned int type) +{ + int rc = aplic_irq_set_type(d, type); + + if (rc) + return rc; + /* + * Updating sourcecfg register for level-triggered interrupts + * requires interrupt retriggering when APLIC is in MSI mode. + */ + aplic_msi_irq_retrigger_level(d); + return 0; +} + static void aplic_msi_write_msg(struct irq_data *d, struct msi_msg *msg) { unsigned int group_index, hart_index, guest_index, val; @@ -130,7 +148,7 @@ static const struct msi_domain_template aplic_msi_template = { .name = "APLIC-MSI", .irq_mask = aplic_msi_irq_mask, .irq_unmask = aplic_msi_irq_unmask, - .irq_set_type = aplic_irq_set_type, + .irq_set_type = aplic_msi_irq_set_type, .irq_eoi = aplic_msi_irq_eoi, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index 238d3d3449..7e08714d50 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -189,7 +189,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, irqc->intr_mask = 0; } - if (irqc->intr_mask >> irqc->nr_irq) + if ((u64)irqc->intr_mask >> irqc->nr_irq) pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", diff --git a/drivers/md/md.c b/drivers/md/md.c index 9c5be016e5..a5b5801baa 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -479,7 +479,6 @@ int mddev_suspend(struct mddev *mddev, bool interruptible) */ WRITE_ONCE(mddev->suspended, mddev->suspended + 1); - del_timer_sync(&mddev->safemode_timer); /* restrict memory reclaim I/O during raid array is suspend */ mddev->noio_flag = memalloc_noio_save(); @@ -8639,12 +8638,12 @@ EXPORT_SYMBOL(md_done_sync); * A return value of 'false' means that the write wasn't recorded * and cannot proceed as the array is being suspend. */ -bool md_write_start(struct mddev *mddev, struct bio *bi) +void md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0; if (bio_data_dir(bi) != WRITE) - return true; + return; BUG_ON(mddev->ro == MD_RDONLY); if (mddev->ro == MD_AUTO_READ) { @@ -8677,15 +8676,9 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); if (!mddev->has_superblocks) - return true; + return; wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || - is_md_suspended(mddev)); - if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { - percpu_ref_put(&mddev->writes_pending); - return false; - } - return true; + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } EXPORT_SYMBOL(md_write_start); diff --git a/drivers/md/md.h b/drivers/md/md.h index ca085ecad5..487582058f 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -785,7 +785,7 @@ extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **t extern void md_wakeup_thread(struct md_thread __rcu *thread); extern void md_check_recovery(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev); -extern bool md_write_start(struct mddev *mddev, struct bio *bi); +extern void md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 22bbd06ba6..5ea57b6748 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1688,8 +1688,7 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio) if (bio_data_dir(bio) == READ) raid1_read_request(mddev, bio, sectors, NULL); else { - if (!md_write_start(mddev,bio)) - return false; + md_write_start(mddev,bio); raid1_write_request(mddev, bio, sectors); } return true; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a4556d2e46..f8d7c02c6e 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1836,8 +1836,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) && md_flush_request(mddev, bio)) return true; - if (!md_write_start(mddev, bio)) - return false; + md_write_start(mddev, bio); if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) if (!raid10_handle_discard(mddev, bio)) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1c6b58adec..ff9f4751c0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6097,8 +6097,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ctx.do_flush = bi->bi_opf & REQ_PREFLUSH; } - if (!md_write_start(mddev, bi)) - return false; + md_write_start(mddev, bi); /* * If array is degraded, better not do chunk aligned read because * later we might have to read it again in order to reconstruct @@ -6273,7 +6272,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk safepos = conf->reshape_safe; sector_div(safepos, data_disks); if (mddev->reshape_backwards) { - BUG_ON(writepos < reshape_sectors); + if (WARN_ON(writepos < reshape_sectors)) + return MaxSector; + writepos -= reshape_sectors; readpos += reshape_sectors; safepos += reshape_sectors; @@ -6291,14 +6292,18 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk * to set 'stripe_addr' which is where we will write to. */ if (mddev->reshape_backwards) { - BUG_ON(conf->reshape_progress == 0); + if (WARN_ON(conf->reshape_progress == 0)) + return MaxSector; + stripe_addr = writepos; - BUG_ON((mddev->dev_sectors & - ~((sector_t)reshape_sectors - 1)) - - reshape_sectors - stripe_addr - != sector_nr); + if (WARN_ON((mddev->dev_sectors & + ~((sector_t)reshape_sectors - 1)) - + reshape_sectors - stripe_addr != sector_nr)) + return MaxSector; } else { - BUG_ON(writepos != sector_nr + reshape_sectors); + if (WARN_ON(writepos != sector_nr + reshape_sectors)) + return MaxSector; + stripe_addr = sector_nr; } diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c index 7e1ecdf248..0fb4d7bff9 100644 --- a/drivers/media/i2c/ov5647.c +++ b/drivers/media/i2c/ov5647.c @@ -1360,24 +1360,21 @@ static int ov5647_parse_dt(struct ov5647 *sensor, struct device_node *np) struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY, }; - struct device_node *ep; + struct device_node *ep __free(device_node) = + of_graph_get_endpoint_by_regs(np, 0, -1); int ret; - ep = of_graph_get_endpoint_by_regs(np, 0, -1); if (!ep) return -EINVAL; ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg); if (ret) - goto out; + return ret; sensor->clock_ncont = bus_cfg.bus.mipi_csi2.flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK; -out: - of_node_put(ep); - - return ret; + return 0; } static int ov5647_probe(struct i2c_client *client) diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig index 154343080c..40e20f0aa5 100644 --- a/drivers/media/pci/intel/ipu6/Kconfig +++ b/drivers/media/pci/intel/ipu6/Kconfig @@ -3,13 +3,14 @@ config VIDEO_INTEL_IPU6 depends on ACPI || COMPILE_TEST depends on VIDEO_DEV depends on X86 && X86_64 && HAS_DMA + depends on IPU_BRIDGE || !IPU_BRIDGE + select AUXILIARY_BUS select DMA_OPS select IOMMU_IOVA select VIDEO_V4L2_SUBDEV_API select MEDIA_CONTROLLER select VIDEOBUF2_DMA_CONTIG select V4L2_FWNODE - select IPU_BRIDGE help This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs and used for capturing images and video from camera sensors. diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c index a57f9f4f3b..6a38a0fa0e 100644 --- a/drivers/media/platform/amphion/vdec.c +++ b/drivers/media/platform/amphion/vdec.c @@ -195,7 +195,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) struct vdec_t *vdec = inst->priv; int ret = 0; - vpu_inst_lock(inst); switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: vdec->params.display_delay_enable = ctrl->val; @@ -207,7 +206,6 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) ret = -EINVAL; break; } - vpu_inst_unlock(inst); return ret; } diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c index 4eb57d793a..16ed4d2151 100644 --- a/drivers/media/platform/amphion/venc.c +++ b/drivers/media/platform/amphion/venc.c @@ -518,7 +518,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) struct venc_t *venc = inst->priv; int ret = 0; - vpu_inst_lock(inst); switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_H264_PROFILE: venc->params.profile = ctrl->val; @@ -579,7 +578,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) ret = -EINVAL; break; } - vpu_inst_unlock(inst); return ret; } diff --git a/drivers/media/tuners/xc2028.c b/drivers/media/tuners/xc2028.c index 5a967edcec..352b8a3679 100644 --- a/drivers/media/tuners/xc2028.c +++ b/drivers/media/tuners/xc2028.c @@ -1361,9 +1361,16 @@ static void load_firmware_cb(const struct firmware *fw, void *context) { struct dvb_frontend *fe = context; - struct xc2028_data *priv = fe->tuner_priv; + struct xc2028_data *priv; int rc; + if (!fe) { + pr_warn("xc2028: No frontend in %s\n", __func__); + return; + } + + priv = fe->tuner_priv; + tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error"); if (!fw) { tuner_err("Could not load firmware %s.\n", priv->fname); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 22d83ac18e..fbf58012be 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -23,40 +23,11 @@ static int dvb_usb_force_pid_filter_usage; module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage, int, 0444); MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID filter, if any (default: 0)."); -static int dvb_usb_check_bulk_endpoint(struct dvb_usb_device *d, u8 endpoint) -{ - if (endpoint) { - int ret; - - ret = usb_pipe_type_check(d->udev, usb_sndbulkpipe(d->udev, endpoint)); - if (ret) - return ret; - ret = usb_pipe_type_check(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); - if (ret) - return ret; - } - return 0; -} - -static void dvb_usb_clear_halt(struct dvb_usb_device *d, u8 endpoint) -{ - if (endpoint) { - usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, endpoint)); - usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); - } -} - static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) { struct dvb_usb_adapter *adap; int ret, n, o; - ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint); - if (ret) - return ret; - ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint_response); - if (ret) - return ret; for (n = 0; n < d->props.num_adapters; n++) { adap = &d->adapter[n]; adap->dev = d; @@ -132,8 +103,10 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) * when reloading the driver w/o replugging the device * sometimes a timeout occurs, this helps */ - dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint); - dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint_response); + if (d->props.generic_bulk_ctrl_endpoint != 0) { + usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); + usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); + } return 0; diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 51f4f653b9..5bebe1460a 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -214,13 +214,13 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, * Compute a bandwidth estimation by multiplying the frame * size by the number of video frames per second, divide the * result by the number of USB frames (or micro-frames for - * high-speed devices) per second and add the UVC header size - * (assumed to be 12 bytes long). + * high- and super-speed devices) per second and add the UVC + * header size (assumed to be 12 bytes long). */ bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp; bandwidth *= 10000000 / interval + 1; bandwidth /= 1000; - if (stream->dev->udev->speed == USB_SPEED_HIGH) + if (stream->dev->udev->speed >= USB_SPEED_HIGH) bandwidth /= 8; bandwidth += 12; @@ -478,6 +478,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, ktime_t time; u16 host_sof; u16 dev_sof; + u32 dev_stc; switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) { case UVC_STREAM_PTS | UVC_STREAM_SCR: @@ -526,6 +527,34 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, if (dev_sof == stream->clock.last_sof) return; + dev_stc = get_unaligned_le32(&data[header_size - 6]); + + /* + * STC (Source Time Clock) is the clock used by the camera. The UVC 1.5 + * standard states that it "must be captured when the first video data + * of a video frame is put on the USB bus". This is generally understood + * as requiring devices to clear the payload header's SCR bit before + * the first packet containing video data. + * + * Most vendors follow that interpretation, but some (namely SunplusIT + * on some devices) always set the `UVC_STREAM_SCR` bit, fill the SCR + * field with 0's,and expect that the driver only processes the SCR if + * there is data in the packet. + * + * Ignore all the hardware timestamp information if we haven't received + * any data for this frame yet, the packet contains no data, and both + * STC and SOF are zero. This heuristics should be safe on compliant + * devices. This should be safe with compliant devices, as in the very + * unlikely case where a UVC 1.1 device would send timing information + * only before the first packet containing data, and both STC and SOF + * happen to be zero for a particular frame, we would only miss one + * clock sample from many and the clock recovery algorithm wouldn't + * suffer from this condition. + */ + if (buf && buf->bytesused == 0 && len == header_size && + dev_stc == 0 && dev_sof == 0) + return; + stream->clock.last_sof = dev_sof; host_sof = usb_get_current_frame_number(stream->dev->udev); @@ -575,7 +604,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, spin_lock_irqsave(&stream->clock.lock, flags); sample = &stream->clock.samples[stream->clock.head]; - sample->dev_stc = get_unaligned_le32(&data[header_size - 6]); + sample->dev_stc = dev_stc; sample->dev_sof = dev_sof; sample->host_sof = host_sof; sample->host_time = time; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index bfe4caa0c9..4cb79a4f24 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -485,6 +485,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); } + tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) - + ilog2(tx_ring->obj_num); tx_ring->obj_size = tx_obj_size; rem = priv->rx_obj_num; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index e5bd57b65a..5b0c7890d4 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, // Marc Kleine-Budde <kernel@pengutronix.de> // // Based on: @@ -16,6 +16,11 @@ #include "mcp251xfd.h" +static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta) +{ + return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); +} + static inline int mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, u8 *tef_tail) @@ -56,55 +61,38 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) } static int -mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) -{ - const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - u32 tef_sta; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); - if (err) - return err; - - if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { - netdev_err(priv->ndev, - "Transmit Event FIFO buffer overflow.\n"); - return -ENOBUFS; - } - - netdev_info(priv->ndev, - "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", - tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? - "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? - "not empty" : "empty", - seq, priv->tef->tail, priv->tef->head, tx_ring->head); - - /* The Sequence Number in the TEF doesn't match our tef_tail. */ - return -EAGAIN; -} - -static int mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_tef_obj *hw_tef_obj, unsigned int *frame_len_ptr) { struct net_device_stats *stats = &priv->ndev->stats; + u32 seq, tef_tail_masked, tef_tail; struct sk_buff *skb; - u32 seq, seq_masked, tef_tail_masked, tef_tail; - seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, + /* Use the MCP2517FD mask on the MCP2518FD, too. We only + * compare 7 bits, this is enough to detect old TEF objects. + */ + seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK, hw_tef_obj->flags); - - /* Use the MCP2517FD mask on the MCP2518FD, too. We only - * compare 7 bits, this should be enough to detect - * net-yet-completed, i.e. old TEF objects. - */ - seq_masked = seq & - field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); tef_tail_masked = priv->tef->tail & field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); - if (seq_masked != tef_tail_masked) - return mcp251xfd_handle_tefif_recover(priv, seq); + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the TX FIFO tail index + * might be corrupted and we might process past the TEF FIFO's + * head into old CAN frames. + * + * Compare the sequence number of the currently processed CAN + * frame with the expected sequence number. Abort with + * -EBADMSG if an old CAN frame is detected. + */ + if (seq != tef_tail_masked) { + netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__, + seq, tef_tail_masked); + stats->tx_fifo_errors++; + + return -EBADMSG; + } tef_tail = mcp251xfd_get_tef_tail(priv); skb = priv->can.echo_skb[tef_tail]; @@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, return 0; } -static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) +static int +mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) { const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - unsigned int new_head; - u8 chip_tx_tail; + const u8 shift = tx_ring->obj_num_shift_to_u8; + u8 chip_tx_tail, tail, len; + u32 fifo_sta; int err; - err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr), + &fifo_sta); if (err) return err; - /* chip_tx_tail, is the next TX-Object send by the HW. - * The new TEF head must be >= the old head, ... + if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) { + *len_p = tx_ring->obj_num; + return 0; + } + + chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + err = mcp251xfd_check_tef_tail(priv); + if (err) + return err; + tail = mcp251xfd_get_tef_tail(priv); + + /* First shift to full u8. The subtraction works on signed + * values, that keeps the difference steady around the u8 + * overflow. The right shift acts on len, which is an u8. */ - new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; - if (new_head <= priv->tef->head) - new_head += tx_ring->obj_num; + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); - /* ... but it cannot exceed the TX head. */ - priv->tef->head = min(new_head, tx_ring->head); + len = (chip_tx_tail << shift) - (tail << shift); + *len_p = len >> shift; - return mcp251xfd_check_tef_tail(priv); + return 0; } static inline int @@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) u8 tef_tail, len, l; int err, i; - err = mcp251xfd_tef_ring_update(priv); + err = mcp251xfd_get_tef_len(priv, &len); if (err) return err; tef_tail = mcp251xfd_get_tef_tail(priv); - len = mcp251xfd_get_tef_len(priv); - l = mcp251xfd_get_tef_linear_len(priv); + l = mcp251xfd_get_tef_linear_len(priv, len); err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); if (err) return err; @@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) unsigned int frame_len = 0; err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); - /* -EAGAIN means the Sequence Number in the TEF - * doesn't match our tef_tail. This can happen if we - * read the TEF objects too early. Leave loop let the - * interrupt handler call us again. + /* -EBADMSG means we're affected by mcp2518fd erratum + * DS80000789E 6., i.e. the Sequence Number in the TEF + * doesn't match our tef_tail. Don't process any + * further and mark processed frames as good. */ - if (err == -EAGAIN) + if (err == -EBADMSG) goto out_netif_wake_queue; if (err) return err; @@ -223,6 +226,8 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) struct mcp251xfd_tx_ring *tx_ring = priv->tx; int offset; + ring->head += len; + /* Increment the TEF FIFO tail pointer 'len' times in * a single SPI message. * diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index b35bfebd23..4628bf847b 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring { /* u8 obj_num equals tx_ring->obj_num */ /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */ + /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */ union mcp251xfd_write_reg_buf irq_enable_buf; struct spi_transfer irq_enable_xfer; @@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring { u8 nr; u8 fifo_nr; u8 obj_num; + u8 obj_num_shift_to_u8; u8 obj_size; struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX]; @@ -861,17 +863,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv) return priv->tef->tail & (priv->tx->obj_num - 1); } -static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv) +static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len) { - return priv->tef->head - priv->tef->tail; -} - -static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv) -{ - u8 len; - - len = mcp251xfd_get_tef_len(priv); - return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv)); } diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index ed1e6560df..0e663ec0c1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -675,8 +675,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) of_remove_property(child, prop); phydev = of_phy_find_device(child); - if (phydev) + if (phydev) { phy_device_remove(phydev); + phy_device_free(phydev); + } } err = mdiobus_register(priv->user_mii_bus); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index baa1eeb9a1..3103e1b32d 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2578,7 +2578,11 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) if (!port) return MICREL_KSZ8_P1_ERRATA; break; + case KSZ8567_CHIP_ID: case KSZ9477_CHIP_ID: + case KSZ9567_CHIP_ID: + case KSZ9896_CHIP_ID: + case KSZ9897_CHIP_ID: /* KSZ9477 Errata DS80000754C * * Module 4: Energy Efficient Ethernet (EEE) feature select must @@ -2588,6 +2592,13 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) * controls. If not disabled, the PHY ports can auto-negotiate * to enable EEE, and this feature can cause link drops when * linked to another device supporting EEE. + * + * The same item appears in the errata for the KSZ9567, KSZ9896, + * and KSZ9897. + * + * A similar item appears in the errata for the KSZ8567, but + * provides an alternative workaround. For now, use the simple + * workaround of disabling the EEE feature for this device too. */ return MICREL_NO_EEE; } @@ -3763,6 +3774,11 @@ static int ksz_port_set_mac_address(struct dsa_switch *ds, int port, return -EBUSY; } + /* Need to initialize variable as the code to fill in settings may + * not be executed. + */ + wol.wolopts = 0; + ksz_get_wol(ds, dp->index, &wol); if (wol.wolopts & WAKE_MAGIC) { dev_err(ds->dev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 23627c973e..a2d672a698 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7433,19 +7433,20 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) int rx = bp->rx_nr_rings, stat; int vnic, grp = rx; - if (hw_resc->resv_tx_rings != bp->tx_nr_rings && - bp->hwrm_spec_code >= 0x10601) - return true; - /* Old firmware does not need RX ring reservations but we still * need to setup a default RSS map when needed. With new firmware * we go through RX ring reservations first and then set up the * RSS map for the successfully reserved RX rings when needed. */ - if (!BNXT_NEW_RM(bp)) { + if (!BNXT_NEW_RM(bp)) bnxt_check_rss_tbl_no_rmgr(bp); + + if (hw_resc->resv_tx_rings != bp->tx_nr_rings && + bp->hwrm_spec_code >= 0x10601) + return true; + + if (!BNXT_NEW_RM(bp)) return false; - } vnic = bnxt_get_total_vnics(bp, rx); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 1248792d7f..0715ea5bf1 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -42,19 +42,15 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; - if (dev->phydev) { + if (dev->phydev) phy_ethtool_get_wol(dev->phydev, wol); - if (wol->supported) - return; - } - if (!device_can_wakeup(kdev)) { - wol->supported = 0; - wol->wolopts = 0; + /* MAC is not wake-up capable, return what the PHY does */ + if (!device_can_wakeup(kdev)) return; - } - wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + /* Overlay MAC capabilities with that of the PHY queried before */ + wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; memset(wol->sopass, 0, sizeof(wol->sopass)); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index e32f6724f5..2e4f3e1782 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -775,6 +775,9 @@ void fec_ptp_stop(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + if (fep->pps_enable) + fec_ptp_enable_pps(fep, 0); + cancel_delayed_work_sync(&fep->time_keep); hrtimer_cancel(&fep->perout_timer); if (fep->ptp_clock) diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index fe1741d482..cf816ede05 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -492,7 +492,7 @@ static int gve_set_channels(struct net_device *netdev, return -EINVAL; } - if (!netif_carrier_ok(netdev)) { + if (!netif_running(netdev)) { priv->tx_cfg.num_queues = new_tx; priv->rx_cfg.num_queues = new_rx; return 0; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index cabf7d4bce..8b14efd14a 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1511,7 +1511,7 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, u32 status; old_prog = READ_ONCE(priv->xdp_prog); - if (!netif_carrier_ok(priv->dev)) { + if (!netif_running(priv->dev)) { WRITE_ONCE(priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); @@ -1784,7 +1784,7 @@ int gve_adjust_queues(struct gve_priv *priv, rx_alloc_cfg.qcfg = &new_rx_config; tx_alloc_cfg.num_rings = new_tx_config.num_queues; - if (netif_carrier_ok(priv->dev)) { + if (netif_running(priv->dev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); return err; } @@ -2001,7 +2001,7 @@ static int gve_set_features(struct net_device *netdev, if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { netdev->features ^= NETIF_F_LRO; - if (netif_carrier_ok(netdev)) { + if (netif_running(netdev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); if (err) { /* Revert the change on error. */ @@ -2290,7 +2290,7 @@ err: int gve_reset(struct gve_priv *priv, bool attempt_teardown) { - bool was_up = netif_carrier_ok(priv->dev); + bool was_up = netif_running(priv->dev); int err; dev_info(&priv->pdev->dev, "Performing reset\n"); @@ -2631,7 +2631,7 @@ static void gve_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct gve_priv *priv = netdev_priv(netdev); - bool was_up = netif_carrier_ok(priv->dev); + bool was_up = netif_running(priv->dev); rtnl_lock(); if (was_up && gve_close(priv->dev)) { @@ -2649,7 +2649,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct gve_priv *priv = netdev_priv(netdev); - bool was_up = netif_carrier_ok(priv->dev); + bool was_up = netif_running(priv->dev); priv->suspend_cnt++; rtnl_lock(); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9b075dd488..f16d13e9ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -560,6 +560,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; + synchronize_irq(pf->oicr_irq.virq); + ice_unplug_aux_dev(pf); /* Notify VFs of impending reset */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index f1ee5584e8..3ac9d7ab83 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -905,8 +905,8 @@ static void idpf_vport_stop(struct idpf_vport *vport) vport->link_up = false; idpf_vport_intr_deinit(vport); - idpf_vport_intr_rel(vport); idpf_vport_queues_rel(vport); + idpf_vport_intr_rel(vport); np->state = __IDPF_VPORT_DOWN; } @@ -1337,9 +1337,8 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) /** * idpf_vport_open - Bring up a vport * @vport: vport to bring up - * @alloc_res: allocate queue resources */ -static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) +static int idpf_vport_open(struct idpf_vport *vport) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); struct idpf_adapter *adapter = vport->adapter; @@ -1352,45 +1351,43 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) /* we do not allow interface up just yet */ netif_carrier_off(vport->netdev); - if (alloc_res) { - err = idpf_vport_queues_alloc(vport); - if (err) - return err; - } - err = idpf_vport_intr_alloc(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", vport->vport_id, err); - goto queues_rel; + return err; } + err = idpf_vport_queues_alloc(vport); + if (err) + goto intr_rel; + err = idpf_vport_queue_ids_init(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; } err = idpf_vport_intr_init(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; } err = idpf_rx_bufs_init_all(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; } err = idpf_queue_reg_init(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", vport->vport_id, err); - goto intr_rel; + goto queues_rel; } idpf_rx_init_buf_tail(vport); @@ -1457,10 +1454,10 @@ unmap_queue_vectors: idpf_send_map_unmap_queue_vector_msg(vport, false); intr_deinit: idpf_vport_intr_deinit(vport); -intr_rel: - idpf_vport_intr_rel(vport); queues_rel: idpf_vport_queues_rel(vport); +intr_rel: + idpf_vport_intr_rel(vport); return err; } @@ -1541,7 +1538,7 @@ void idpf_init_task(struct work_struct *work) np = netdev_priv(vport->netdev); np->state = __IDPF_VPORT_DOWN; if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) - idpf_vport_open(vport, true); + idpf_vport_open(vport); /* Spawn and return 'idpf_init_task' work queue until all the * default vports are created @@ -1900,9 +1897,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, goto free_vport; } - err = idpf_vport_queues_alloc(new_vport); - if (err) - goto free_vport; if (current_state <= __IDPF_VPORT_DOWN) { idpf_send_delete_queues_msg(vport); } else { @@ -1974,17 +1968,23 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, err = idpf_set_real_num_queues(vport); if (err) - goto err_reset; + goto err_open; if (current_state == __IDPF_VPORT_UP) - err = idpf_vport_open(vport, false); + err = idpf_vport_open(vport); kfree(new_vport); return err; err_reset: - idpf_vport_queues_rel(new_vport); + idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, + vport->num_rxq, vport->num_bufq); + +err_open: + if (current_state == __IDPF_VPORT_UP) + idpf_vport_open(vport); + free_vport: kfree(new_vport); @@ -2213,7 +2213,7 @@ static int idpf_open(struct net_device *netdev) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); - err = idpf_vport_open(vport, true); + err = idpf_vport_open(vport); idpf_vport_ctrl_unlock(netdev); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index b023704bbb..20ca04320d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3436,9 +3436,7 @@ static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) */ void idpf_vport_intr_rel(struct idpf_vport *vport) { - int i, j, v_idx; - - for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { + for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; kfree(q_vector->bufq); @@ -3449,26 +3447,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) q_vector->rx = NULL; } - /* Clean up the mapping of queues to vectors */ - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; - - if (idpf_is_queue_model_split(vport->rxq_model)) - for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++) - rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL; - else - for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) - rx_qgrp->singleq.rxqs[j]->q_vector = NULL; - } - - if (idpf_is_queue_model_split(vport->txq_model)) - for (i = 0; i < vport->num_txq_grp; i++) - vport->txq_grps[i].complq->q_vector = NULL; - else - for (i = 0; i < vport->num_txq_grp; i++) - for (j = 0; j < vport->txq_grps[i].num_txq; j++) - vport->txq_grps[i].txqs[j]->q_vector = NULL; - kfree(vport->q_vectors); vport->q_vectors = NULL; } @@ -3636,13 +3614,15 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) /** * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport * @vport: main vport structure - * @basename: name for the vector */ -static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) +static int idpf_vport_intr_req_irq(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + const char *drv_name, *if_name, *vec_name; int vector, err, irq_num, vidx; - const char *vec_name; + + drv_name = dev_driver_string(&adapter->pdev->dev); + if_name = netdev_name(vport->netdev); for (vector = 0; vector < vport->num_q_vectors; vector++) { struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; @@ -3659,8 +3639,8 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) else continue; - q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - basename, vec_name, vidx); + q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, + if_name, vec_name, vidx); err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, q_vector->name, q_vector); @@ -4170,7 +4150,6 @@ error: */ int idpf_vport_intr_init(struct idpf_vport *vport) { - char *int_name; int err; err = idpf_vport_intr_init_vec_idx(vport); @@ -4184,11 +4163,7 @@ int idpf_vport_intr_init(struct idpf_vport *vport) if (err) goto unroll_vectors_alloc; - int_name = kasprintf(GFP_KERNEL, "%s-%s", - dev_driver_string(&vport->adapter->pdev->dev), - vport->netdev->name); - - err = idpf_vport_intr_req_irq(vport, int_name); + err = idpf_vport_intr_req_irq(vport); if (err) goto unroll_vectors_alloc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index b5333da20e..cdc84a27a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2374,6 +2374,9 @@ mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; + if (unlikely(!cstrides)) + return; + wq = &rq->mpwqe.wq; wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c0ced4d315..d92f640bae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1599,6 +1599,7 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, { struct pci_dev *pdev = mlxsw_pci->pdev; char mrsr_pl[MLXSW_REG_MRSR_LEN]; + struct pci_dev *bridge; int err; if (!pci_reset_sbr_supported) { @@ -1615,6 +1616,9 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, sbr: device_lock_assert(&pdev->dev); + bridge = pci_upstream_bridge(pdev); + if (bridge) + pci_cfg_access_lock(bridge); pci_cfg_access_lock(pdev); pci_save_state(pdev); @@ -1624,6 +1628,8 @@ sbr: pci_restore_state(pdev); pci_cfg_access_unlock(pdev); + if (bridge) + pci_cfg_access_unlock(bridge); return err; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 466c4002f0..3a7f3a8b06 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -21,6 +21,7 @@ #define RGMII_IO_MACRO_CONFIG2 0x1C #define RGMII_IO_MACRO_DEBUG1 0x20 #define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28 +#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4 /* RGMII_IO_MACRO_CONFIG fields */ #define RGMII_CONFIG_FUNC_CLK_EN BIT(30) @@ -79,6 +80,9 @@ #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) +/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */ +#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3) + #define SGMII_10M_RX_CLK_DVDR 0x31 struct ethqos_emac_por { @@ -95,6 +99,7 @@ struct ethqos_emac_driver_data { bool has_integrated_pcs; u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; + bool needs_sgmii_loopback; }; struct qcom_ethqos { @@ -114,6 +119,7 @@ struct qcom_ethqos { unsigned int num_por; bool rgmii_config_loopback_en; bool has_emac_ge_3; + bool needs_sgmii_loopback; }; static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) @@ -191,8 +197,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed) clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate); } +static void +qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable) +{ + if (!ethqos->needs_sgmii_loopback || + ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX) + return; + + rgmii_updatel(ethqos, + SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, + enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0, + EMAC_WRAPPER_SGMII_PHY_CNTRL1); +} + static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos) { + qcom_ethqos_set_sgmii_loopback(ethqos, true); rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); } @@ -277,6 +297,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .has_emac_ge_3 = true, .link_clk_name = "phyaux", .has_integrated_pcs = true, + .needs_sgmii_loopback = true, .dma_addr_width = 36, .dwmac4_addrs = { .dma_chan = 0x00008100, @@ -674,6 +695,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo { struct qcom_ethqos *ethqos = priv; + qcom_ethqos_set_sgmii_loopback(ethqos, false); ethqos->speed = speed; ethqos_update_link_clk(ethqos, speed); ethqos_configure(ethqos); @@ -809,6 +831,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos->num_por = data->num_por; ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en; ethqos->has_emac_ge_3 = data->has_emac_ge_3; + ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback; ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii"); if (IS_ERR(ethqos->link_clk)) diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c index 98ffbb1bbf..2d1c2e5706 100644 --- a/drivers/net/pse-pd/tps23881.c +++ b/drivers/net/pse-pd/tps23881.c @@ -5,6 +5,7 @@ * Copyright (c) 2023 Bootlin, Kory Maincent <kory.maincent@bootlin.com> */ +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/i2c.h> @@ -29,6 +30,8 @@ #define TPS23881_REG_TPON BIT(0) #define TPS23881_REG_FWREV 0x41 #define TPS23881_REG_DEVID 0x43 +#define TPS23881_REG_DEVID_MASK 0xF0 +#define TPS23881_DEVICE_ID 0x02 #define TPS23881_REG_SRAM_CTRL 0x60 #define TPS23881_REG_SRAM_DATA 0x61 @@ -750,7 +753,7 @@ static int tps23881_i2c_probe(struct i2c_client *client) if (ret < 0) return ret; - if (ret != 0x22) { + if (FIELD_GET(TPS23881_REG_DEVID_MASK, ret) != TPS23881_DEVICE_ID) { dev_err(dev, "Wrong device ID\n"); return -ENXIO; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 386d62769d..cfda32047c 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -201,6 +201,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) break; default: /* not ip - do not know what to do */ + kfree_skb(skbn); goto skip; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5161e7efda..f32e017b62 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3257,7 +3257,11 @@ static int virtnet_set_ringparam(struct net_device *dev, err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, vi->intr_coal_tx.max_usecs, vi->intr_coal_tx.max_packets); - if (err) + + /* Don't break the tx resize action if the vq coalescing is not + * supported. The same is true for rx resize below. + */ + if (err && err != -EOPNOTSUPP) return err; } @@ -3272,7 +3276,7 @@ static int virtnet_set_ringparam(struct net_device *dev, vi->intr_coal_rx.max_usecs, vi->intr_coal_rx.max_packets); mutex_unlock(&vi->rq[i].dim_lock); - if (err) + if (err && err != -EOPNOTSUPP) return err; } } diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 121f27284b..1d287ed25a 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -2793,6 +2793,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev peer = ath12k_peer_find(ab, vdev_id, peer_mac); if (!peer) { spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); return -ENOENT; } diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 55fde0d331..f92b4ce49d 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -1091,14 +1091,14 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) { int i; - set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); - for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; napi_enable(&irq_grp->napi); ath12k_pci_ext_grp_enable(irq_grp); } + + set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); } void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 2ea72d9e39..4d2931e544 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -23,6 +23,8 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi"); #define MAX_USBCTRL_VENDORREQ_TIMES 10 +static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw); + static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype, u16 value, void *pdata, u16 len) { @@ -285,9 +287,23 @@ static int _rtl_usb_init(struct ieee80211_hw *hw) } /* usb endpoint mapping */ err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw); - rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; - _rtl_usb_init_tx(hw); - _rtl_usb_init_rx(hw); + if (err) + return err; + + rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; + + err = _rtl_usb_init_tx(hw); + if (err) + return err; + + err = _rtl_usb_init_rx(hw); + if (err) + goto err_out; + + return 0; + +err_out: + _rtl_usb_cleanup_tx(hw); return err; } @@ -691,17 +707,13 @@ static int rtl_usb_start(struct ieee80211_hw *hw) } /*======================= tx =========================================*/ -static void rtl_usb_cleanup(struct ieee80211_hw *hw) +static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; - /* clean up rx stuff. */ - _rtl_usb_cleanup_rx(hw); - - /* clean up tx stuff */ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); @@ -715,6 +727,12 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw) usb_kill_anchored_urbs(&rtlusb->tx_submitted); } +static void rtl_usb_cleanup(struct ieee80211_hw *hw) +{ + _rtl_usb_cleanup_rx(hw); + _rtl_usb_cleanup_tx(hw); +} + /* We may add some struct into struct rtl_usb later. Do deinit here. */ static void rtl_usb_deinit(struct ieee80211_hw *hw) { diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index b36aa9a6bb..312b57d7da 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -183,14 +183,17 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, struct sk_buff *skb) { - struct rtw89_pci_rxbd_info *rxbd_info; struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); + struct rtw89_pci_rxbd_info *rxbd_info; + __le32 info; rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; - rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); - rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); - rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); - rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); + info = rxbd_info->dword; + + rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); + rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); + rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); + rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); } static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 0cfa39361d..25ecc1a005 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -1388,7 +1388,7 @@ static void devm_apple_nvme_mempool_destroy(void *data) mempool_destroy(data); } -static int apple_nvme_probe(struct platform_device *pdev) +static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct apple_nvme *anv; @@ -1396,7 +1396,7 @@ static int apple_nvme_probe(struct platform_device *pdev) anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); if (!anv) - return -ENOMEM; + return ERR_PTR(-ENOMEM); anv->dev = get_device(dev); anv->adminq.is_adminq = true; @@ -1516,10 +1516,26 @@ static int apple_nvme_probe(struct platform_device *pdev) goto put_dev; } + return anv; +put_dev: + put_device(anv->dev); + return ERR_PTR(ret); +} + +static int apple_nvme_probe(struct platform_device *pdev) +{ + struct apple_nvme *anv; + int ret; + + anv = apple_nvme_alloc(pdev); + if (IS_ERR(anv)) + return PTR_ERR(anv); + anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL); if (IS_ERR(anv->ctrl.admin_q)) { ret = -ENOMEM; - goto put_dev; + anv->ctrl.admin_q = NULL; + goto out_uninit_ctrl; } nvme_reset_ctrl(&anv->ctrl); @@ -1527,8 +1543,9 @@ static int apple_nvme_probe(struct platform_device *pdev) return 0; -put_dev: - put_device(anv->dev); +out_uninit_ctrl: + nvme_uninit_ctrl(&anv->ctrl); + nvme_put_ctrl(&anv->ctrl); return ret; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5a93f021ca..a823330567 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -826,9 +826,9 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct bio_vec bv = rq_integrity_vec(req); - iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), - rq_dma_dir(req), 0); + iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->meta_dma)) return BLK_STS_IOERR; cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); @@ -968,7 +968,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req) struct nvme_iod *iod = blk_mq_rq_to_pdu(req); dma_unmap_page(dev->dev, iod->meta_dma, - rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); + rq_integrity_vec(req).bv_len, rq_dma_dir(req)); } if (blk_rq_nr_phys_segments(req)) @@ -2933,6 +2933,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; } + /* + * NVMe SSD drops off the PCIe bus after system idle + * for 10 hours on a Lenovo N60z board. + */ + if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6")) + return NVME_QUIRK_NO_APST; + return 0; } diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index ddfbfec44f..43e0914256 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -39,6 +39,11 @@ static bool cros_ec_lpc_acpi_device_found; * be used as the base port for EC mapped memory. */ #define CROS_EC_LPC_QUIRK_REMAP_MEMORY BIT(0) +/* + * Indicates that lpc_driver_data.quirk_acpi_id should be used to find + * the ACPI device. + */ +#define CROS_EC_LPC_QUIRK_ACPI_ID BIT(1) /** * struct lpc_driver_data - driver data attached to a DMI device ID to indicate @@ -46,10 +51,12 @@ static bool cros_ec_lpc_acpi_device_found; * @quirks: a bitfield composed of quirks from CROS_EC_LPC_QUIRK_* * @quirk_mmio_memory_base: The first I/O port addressing EC mapped memory (used * when quirk ...REMAP_MEMORY is set.) + * @quirk_acpi_id: An ACPI HID to be used to find the ACPI device. */ struct lpc_driver_data { u32 quirks; u16 quirk_mmio_memory_base; + const char *quirk_acpi_id; }; /** @@ -374,6 +381,26 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data) pm_system_wakeup(); } +static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level, + void *context, void **retval) +{ + *(struct acpi_device **)context = acpi_fetch_acpi_dev(handle); + return AE_CTRL_TERMINATE; +} + +static struct acpi_device *cros_ec_lpc_get_device(const char *id) +{ + struct acpi_device *adev = NULL; + acpi_status status = acpi_get_devices(id, cros_ec_lpc_parse_device, + &adev, NULL); + if (ACPI_FAILURE(status)) { + pr_warn(DRV_NAME ": Looking for %s failed\n", id); + return NULL; + } + + return adev; +} + static int cros_ec_lpc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -401,6 +428,16 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) if (quirks & CROS_EC_LPC_QUIRK_REMAP_MEMORY) ec_lpc->mmio_memory_base = driver_data->quirk_mmio_memory_base; + + if (quirks & CROS_EC_LPC_QUIRK_ACPI_ID) { + adev = cros_ec_lpc_get_device(driver_data->quirk_acpi_id); + if (!adev) { + dev_err(dev, "failed to get ACPI device '%s'", + driver_data->quirk_acpi_id); + return -ENODEV; + } + ACPI_COMPANION_SET(dev, adev); + } } /* @@ -661,23 +698,12 @@ static struct platform_device cros_ec_lpc_device = { .name = DRV_NAME }; -static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level, - void *context, void **retval) -{ - *(bool *)context = true; - return AE_CTRL_TERMINATE; -} - static int __init cros_ec_lpc_init(void) { int ret; - acpi_status status; const struct dmi_system_id *dmi_match; - status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device, - &cros_ec_lpc_acpi_device_found, NULL); - if (ACPI_FAILURE(status)) - pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME); + cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME); dmi_match = dmi_first_match(cros_ec_lpc_dmi_table); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 665fa95249..ddfccc2267 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -477,6 +477,7 @@ config LENOVO_YMC tristate "Lenovo Yoga Tablet Mode Control" depends on ACPI_WMI depends on INPUT + depends on IDEAPAD_LAPTOP select INPUT_SPARSEKMAP help This driver maps the Tablet Mode Control switch to SW_TABLET_MODE input diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c index a3dec14c30..3c153fb142 100644 --- a/drivers/platform/x86/amd/pmf/spc.c +++ b/drivers/platform/x86/amd/pmf/spc.c @@ -150,36 +150,26 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_ return 0; } -static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) +static void amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) { struct amd_sfh_info sfh_info; - int ret; + + /* Get the latest information from SFH */ + in->ev_info.user_present = false; /* Get ALS data */ - ret = amd_get_sfh_info(&sfh_info, MT_ALS); - if (!ret) + if (!amd_get_sfh_info(&sfh_info, MT_ALS)) in->ev_info.ambient_light = sfh_info.ambient_light; else - return ret; + dev_dbg(dev->dev, "ALS is not enabled/detected\n"); /* get HPD data */ - ret = amd_get_sfh_info(&sfh_info, MT_HPD); - if (ret) - return ret; - - switch (sfh_info.user_present) { - case SFH_NOT_DETECTED: - in->ev_info.user_present = 0xff; /* assume no sensors connected */ - break; - case SFH_USER_PRESENT: - in->ev_info.user_present = 1; - break; - case SFH_USER_AWAY: - in->ev_info.user_present = 0; - break; + if (!amd_get_sfh_info(&sfh_info, MT_HPD)) { + if (sfh_info.user_present == SFH_USER_PRESENT) + in->ev_info.user_present = true; + } else { + dev_dbg(dev->dev, "HPD is not enabled/detected\n"); } - - return 0; } void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index fcf13d88fd..490815917a 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -125,6 +125,7 @@ struct ideapad_rfk_priv { struct ideapad_private { struct acpi_device *adev; + struct mutex vpc_mutex; /* protects the VPC calls */ struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM]; struct platform_device *platform_device; @@ -145,6 +146,7 @@ struct ideapad_private { bool touchpad_ctrl_via_ec : 1; bool ctrl_ps2_aux_port : 1; bool usb_charging : 1; + bool ymc_ec_trigger : 1; } features; struct { bool initialized; @@ -193,6 +195,12 @@ MODULE_PARM_DESC(touchpad_ctrl_via_ec, "Enable registering a 'touchpad' sysfs-attribute which can be used to manually " "tell the EC to enable/disable the touchpad. This may not work on all models."); +static bool ymc_ec_trigger __read_mostly; +module_param(ymc_ec_trigger, bool, 0444); +MODULE_PARM_DESC(ymc_ec_trigger, + "Enable EC triggering work-around to force emitting tablet mode events. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); + /* * shared data */ @@ -297,6 +305,8 @@ static int debugfs_status_show(struct seq_file *s, void *data) struct ideapad_private *priv = s->private; unsigned long value; + guard(mutex)(&priv->vpc_mutex); + if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value)) seq_printf(s, "Backlight max: %lu\n", value); if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value)) @@ -415,7 +425,8 @@ static ssize_t camera_power_show(struct device *dev, unsigned long result; int err; - err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result); + scoped_guard(mutex, &priv->vpc_mutex) + err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result); if (err) return err; @@ -434,7 +445,8 @@ static ssize_t camera_power_store(struct device *dev, if (err) return err; - err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); + scoped_guard(mutex, &priv->vpc_mutex) + err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); if (err) return err; @@ -487,7 +499,8 @@ static ssize_t fan_mode_show(struct device *dev, unsigned long result; int err; - err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result); + scoped_guard(mutex, &priv->vpc_mutex) + err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result); if (err) return err; @@ -509,7 +522,8 @@ static ssize_t fan_mode_store(struct device *dev, if (state > 4 || state == 3) return -EINVAL; - err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); + scoped_guard(mutex, &priv->vpc_mutex) + err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); if (err) return err; @@ -594,7 +608,8 @@ static ssize_t touchpad_show(struct device *dev, unsigned long result; int err; - err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result); + scoped_guard(mutex, &priv->vpc_mutex) + err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result); if (err) return err; @@ -615,7 +630,8 @@ static ssize_t touchpad_store(struct device *dev, if (err) return err; - err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state); + scoped_guard(mutex, &priv->vpc_mutex) + err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state); if (err) return err; @@ -1012,6 +1028,8 @@ static int ideapad_rfk_set(void *data, bool blocked) struct ideapad_rfk_priv *priv = data; int opcode = ideapad_rfk_data[priv->dev].opcode; + guard(mutex)(&priv->priv->vpc_mutex); + return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked); } @@ -1025,6 +1043,8 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv) int i; if (priv->features.hw_rfkill_switch) { + guard(mutex)(&priv->vpc_mutex); + if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked)) return; hw_blocked = !hw_blocked; @@ -1198,8 +1218,9 @@ static void ideapad_input_novokey(struct ideapad_private *priv) { unsigned long long_pressed; - if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed)) - return; + scoped_guard(mutex, &priv->vpc_mutex) + if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed)) + return; if (long_pressed) ideapad_input_report(priv, 17); @@ -1211,8 +1232,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) { unsigned long bit, value; - if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value)) - return; + scoped_guard(mutex, &priv->vpc_mutex) + if (read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value)) + return; for_each_set_bit (bit, &value, 16) { switch (bit) { @@ -1245,6 +1267,8 @@ static int ideapad_backlight_get_brightness(struct backlight_device *blightdev) unsigned long now; int err; + guard(mutex)(&priv->vpc_mutex); + err = read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); if (err) return err; @@ -1257,6 +1281,8 @@ static int ideapad_backlight_update_status(struct backlight_device *blightdev) struct ideapad_private *priv = bl_get_data(blightdev); int err; + guard(mutex)(&priv->vpc_mutex); + err = write_ec_cmd(priv->adev->handle, VPCCMD_W_BL, blightdev->props.brightness); if (err) @@ -1334,6 +1360,8 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv) if (!blightdev) return; + guard(mutex)(&priv->vpc_mutex); + if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power)) return; @@ -1346,7 +1374,8 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv) /* if we control brightness via acpi video driver */ if (!priv->blightdev) - read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); + scoped_guard(mutex, &priv->vpc_mutex) + read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now); else backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY); } @@ -1571,7 +1600,8 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_ int ret; /* Without reading from EC touchpad LED doesn't switch state */ - ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value); + scoped_guard(mutex, &priv->vpc_mutex) + ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value); if (ret) return; @@ -1599,16 +1629,92 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_ priv->r_touchpad_val = value; } +static const struct dmi_system_id ymc_ec_trigger_quirk_dmi_table[] = { + { + /* Lenovo Yoga 7 14ARB7 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), + }, + }, + { + /* Lenovo Yoga 7 14ACN6 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), + }, + }, + { } +}; + +static void ideapad_laptop_trigger_ec(void) +{ + struct ideapad_private *priv; + int ret; + + guard(mutex)(&ideapad_shared_mutex); + + priv = ideapad_shared; + if (!priv) + return; + + if (!priv->features.ymc_ec_trigger) + return; + + scoped_guard(mutex, &priv->vpc_mutex) + ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_YMC, 1); + if (ret) + dev_warn(&priv->platform_device->dev, "Could not write YMC: %d\n", ret); +} + +static int ideapad_laptop_nb_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + switch (action) { + case IDEAPAD_LAPTOP_YMC_EVENT: + ideapad_laptop_trigger_ec(); + break; + } + + return 0; +} + +static struct notifier_block ideapad_laptop_notifier = { + .notifier_call = ideapad_laptop_nb_notify, +}; + +static BLOCKING_NOTIFIER_HEAD(ideapad_laptop_chain_head); + +int ideapad_laptop_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&ideapad_laptop_chain_head, nb); +} +EXPORT_SYMBOL_NS_GPL(ideapad_laptop_register_notifier, IDEAPAD_LAPTOP); + +int ideapad_laptop_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&ideapad_laptop_chain_head, nb); +} +EXPORT_SYMBOL_NS_GPL(ideapad_laptop_unregister_notifier, IDEAPAD_LAPTOP); + +void ideapad_laptop_call_notifier(unsigned long action, void *data) +{ + blocking_notifier_call_chain(&ideapad_laptop_chain_head, action, data); +} +EXPORT_SYMBOL_NS_GPL(ideapad_laptop_call_notifier, IDEAPAD_LAPTOP); + static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) { struct ideapad_private *priv = data; unsigned long vpc1, vpc2, bit; - if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1)) - return; + scoped_guard(mutex, &priv->vpc_mutex) { + if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1)) + return; - if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2)) - return; + if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2)) + return; + } vpc1 = (vpc2 << 8) | vpc1; @@ -1735,6 +1841,8 @@ static void ideapad_check_features(struct ideapad_private *priv) priv->features.ctrl_ps2_aux_port = ctrl_ps2_aux_port || dmi_check_system(ctrl_ps2_aux_port_list); priv->features.touchpad_ctrl_via_ec = touchpad_ctrl_via_ec; + priv->features.ymc_ec_trigger = + ymc_ec_trigger || dmi_check_system(ymc_ec_trigger_quirk_dmi_table); if (!read_ec_data(handle, VPCCMD_R_FAN, &val)) priv->features.fan_mode = true; @@ -1915,6 +2023,10 @@ static int ideapad_acpi_add(struct platform_device *pdev) priv->adev = adev; priv->platform_device = pdev; + err = devm_mutex_init(&pdev->dev, &priv->vpc_mutex); + if (err) + return err; + ideapad_check_features(priv); err = ideapad_sysfs_init(priv); @@ -1983,6 +2095,8 @@ static int ideapad_acpi_add(struct platform_device *pdev) if (err) goto shared_init_failed; + ideapad_laptop_register_notifier(&ideapad_laptop_notifier); + return 0; shared_init_failed: @@ -2015,6 +2129,8 @@ static void ideapad_acpi_remove(struct platform_device *pdev) struct ideapad_private *priv = dev_get_drvdata(&pdev->dev); int i; + ideapad_laptop_unregister_notifier(&ideapad_laptop_notifier); + ideapad_shared_exit(priv); acpi_remove_notify_handler(priv->adev->handle, diff --git a/drivers/platform/x86/ideapad-laptop.h b/drivers/platform/x86/ideapad-laptop.h index 4498a96de5..948cc61800 100644 --- a/drivers/platform/x86/ideapad-laptop.h +++ b/drivers/platform/x86/ideapad-laptop.h @@ -12,6 +12,15 @@ #include <linux/acpi.h> #include <linux/jiffies.h> #include <linux/errno.h> +#include <linux/notifier.h> + +enum ideapad_laptop_notifier_actions { + IDEAPAD_LAPTOP_YMC_EVENT, +}; + +int ideapad_laptop_register_notifier(struct notifier_block *nb); +int ideapad_laptop_unregister_notifier(struct notifier_block *nb); +void ideapad_laptop_call_notifier(unsigned long action, void *data); enum { VPCCMD_R_VPC1 = 0x10, diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 282e4bfe30..be3d51ed0e 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -221,8 +221,8 @@ static int doscan(void *data) */ static void ifs_test_core(int cpu, struct device *dev) { + union ifs_status status = {}; union ifs_scan activate; - union ifs_status status; unsigned long timeout; struct ifs_data *ifsd; int to_start, to_stop; diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c index 9b7ce03ba0..a353e830b6 100644 --- a/drivers/platform/x86/intel/vbtn.c +++ b/drivers/platform/x86/intel/vbtn.c @@ -7,11 +7,13 @@ */ #include <linux/acpi.h> +#include <linux/cleanup.h> #include <linux/dmi.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/suspend.h> #include "../dual_accel_detect.h" @@ -66,6 +68,7 @@ static const struct key_entry intel_vbtn_switchmap[] = { }; struct intel_vbtn_priv { + struct mutex mutex; /* Avoid notify_handler() racing with itself */ struct input_dev *buttons_dev; struct input_dev *switches_dev; bool dual_accel; @@ -155,6 +158,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) bool autorelease; int ret; + guard(mutex)(&priv->mutex); + if ((ke = sparse_keymap_entry_from_scancode(priv->buttons_dev, event))) { if (!priv->has_buttons) { dev_warn(&device->dev, "Warning: received 0x%02x button event on a device without buttons, please report this.\n", @@ -290,6 +295,10 @@ static int intel_vbtn_probe(struct platform_device *device) return -ENOMEM; dev_set_drvdata(&device->dev, priv); + err = devm_mutex_init(&device->dev, &priv->mutex); + if (err) + return err; + priv->dual_accel = dual_accel; priv->has_buttons = has_buttons; priv->has_switches = has_switches; diff --git a/drivers/platform/x86/lenovo-ymc.c b/drivers/platform/x86/lenovo-ymc.c index e1fbc35504..e0bbd6a14a 100644 --- a/drivers/platform/x86/lenovo-ymc.c +++ b/drivers/platform/x86/lenovo-ymc.c @@ -20,32 +20,10 @@ #define LENOVO_YMC_QUERY_INSTANCE 0 #define LENOVO_YMC_QUERY_METHOD 0x01 -static bool ec_trigger __read_mostly; -module_param(ec_trigger, bool, 0444); -MODULE_PARM_DESC(ec_trigger, "Enable EC triggering work-around to force emitting tablet mode events"); - static bool force; module_param(force, bool, 0444); MODULE_PARM_DESC(force, "Force loading on boards without a convertible DMI chassis-type"); -static const struct dmi_system_id ec_trigger_quirk_dmi_table[] = { - { - /* Lenovo Yoga 7 14ARB7 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), - }, - }, - { - /* Lenovo Yoga 7 14ACN6 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), - }, - }, - { } -}; - static const struct dmi_system_id allowed_chasis_types_dmi_table[] = { { .matches = { @@ -62,21 +40,8 @@ static const struct dmi_system_id allowed_chasis_types_dmi_table[] = { struct lenovo_ymc_private { struct input_dev *input_dev; - struct acpi_device *ec_acpi_dev; }; -static void lenovo_ymc_trigger_ec(struct wmi_device *wdev, struct lenovo_ymc_private *priv) -{ - int err; - - if (!priv->ec_acpi_dev) - return; - - err = write_ec_cmd(priv->ec_acpi_dev->handle, VPCCMD_W_YMC, 1); - if (err) - dev_warn(&wdev->dev, "Could not write YMC: %d\n", err); -} - static const struct key_entry lenovo_ymc_keymap[] = { /* Laptop */ { KE_SW, 0x01, { .sw = { SW_TABLET_MODE, 0 } } }, @@ -125,11 +90,9 @@ static void lenovo_ymc_notify(struct wmi_device *wdev, union acpi_object *data) free_obj: kfree(obj); - lenovo_ymc_trigger_ec(wdev, priv); + ideapad_laptop_call_notifier(IDEAPAD_LAPTOP_YMC_EVENT, &code); } -static void acpi_dev_put_helper(void *p) { acpi_dev_put(p); } - static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) { struct lenovo_ymc_private *priv; @@ -143,29 +106,10 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) return -ENODEV; } - ec_trigger |= dmi_check_system(ec_trigger_quirk_dmi_table); - priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - if (ec_trigger) { - pr_debug("Lenovo YMC enable EC triggering.\n"); - priv->ec_acpi_dev = acpi_dev_get_first_match_dev("VPC2004", NULL, -1); - - if (!priv->ec_acpi_dev) { - dev_err(&wdev->dev, "Could not find EC ACPI device.\n"); - return -ENODEV; - } - err = devm_add_action_or_reset(&wdev->dev, - acpi_dev_put_helper, priv->ec_acpi_dev); - if (err) { - dev_err(&wdev->dev, - "Could not clean up EC ACPI device: %d\n", err); - return err; - } - } - input_dev = devm_input_allocate_device(&wdev->dev); if (!input_dev) return -ENOMEM; @@ -192,7 +136,6 @@ static int lenovo_ymc_probe(struct wmi_device *wdev, const void *ctx) dev_set_drvdata(&wdev->dev, priv); /* Report the state for the first time on probe */ - lenovo_ymc_trigger_ec(wdev, priv); lenovo_ymc_notify(wdev, NULL); return 0; } @@ -217,3 +160,4 @@ module_wmi_driver(lenovo_ymc_driver); MODULE_AUTHOR("Gergo Koteles <soyer@irl.hu>"); MODULE_DESCRIPTION("Lenovo Yoga Mode Control driver"); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IDEAPAD_LAPTOP); diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index b5903193e2..ac05942e4e 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -178,18 +178,18 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv) u8 reg_val; int ret; - if (cv <= CV_4100MV) { - reg_val = CHRG_CCCV_CV_4100MV; - cv = CV_4100MV; - } else if (cv <= CV_4150MV) { - reg_val = CHRG_CCCV_CV_4150MV; - cv = CV_4150MV; - } else if (cv <= CV_4200MV) { + if (cv >= CV_4350MV) { + reg_val = CHRG_CCCV_CV_4350MV; + cv = CV_4350MV; + } else if (cv >= CV_4200MV) { reg_val = CHRG_CCCV_CV_4200MV; cv = CV_4200MV; + } else if (cv >= CV_4150MV) { + reg_val = CHRG_CCCV_CV_4150MV; + cv = CV_4150MV; } else { - reg_val = CHRG_CCCV_CV_4350MV; - cv = CV_4350MV; + reg_val = CHRG_CCCV_CV_4100MV; + cv = CV_4100MV; } reg_val = reg_val << CHRG_CCCV_CV_BIT_POS; @@ -337,8 +337,8 @@ static int axp288_charger_usb_set_property(struct power_supply *psy, } break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: - scaled_val = min(val->intval, info->max_cv); - scaled_val = DIV_ROUND_CLOSEST(scaled_val, 1000); + scaled_val = DIV_ROUND_CLOSEST(val->intval, 1000); + scaled_val = min(scaled_val, info->max_cv); ret = axp288_charger_set_cv(info, scaled_val); if (ret < 0) { dev_warn(&info->pdev->dev, "set charge voltage failed\n"); diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c index ec163d1bcd..44c6301f5f 100644 --- a/drivers/power/supply/qcom_battmgr.c +++ b/drivers/power/supply/qcom_battmgr.c @@ -486,7 +486,7 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy, int ret; if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN; if (battmgr->variant == QCOM_BATTMGR_SC8280XP) ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); @@ -683,7 +683,7 @@ static int qcom_battmgr_ac_get_property(struct power_supply *psy, int ret; if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN; ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); if (ret) @@ -748,7 +748,7 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy, int ret; if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN; if (battmgr->variant == QCOM_BATTMGR_SC8280XP) ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); @@ -867,7 +867,7 @@ static int qcom_battmgr_wls_get_property(struct power_supply *psy, int ret; if (!battmgr->service_up) - return -ENODEV; + return -EAGAIN; if (battmgr->variant == QCOM_BATTMGR_SC8280XP) ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp); diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c index 32eafe2c00..7a27b262fb 100644 --- a/drivers/power/supply/rt5033_battery.c +++ b/drivers/power/supply/rt5033_battery.c @@ -159,6 +159,7 @@ static int rt5033_battery_probe(struct i2c_client *client) return -EINVAL; } + i2c_set_clientdata(client, battery); psy_cfg.of_node = client->dev.of_node; psy_cfg.drv_data = battery; diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c index f9e164be75..944e75beb1 100644 --- a/drivers/s390/char/sclp_sd.c +++ b/drivers/s390/char/sclp_sd.c @@ -320,8 +320,14 @@ static int sclp_sd_store_data(struct sclp_sd_data *result, u8 di) &esize); if (rc) { /* Cancel running request if interrupted */ - if (rc == -ERESTARTSYS) - sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL); + if (rc == -ERESTARTSYS) { + if (sclp_sd_sync(page, SD_EQ_HALT, di, 0, 0, NULL, NULL)) { + pr_warn("Could not stop Store Data request - leaking at least %zu bytes\n", + (size_t)dsize * PAGE_SIZE); + data = NULL; + asce = 0; + } + } vfree(data); goto out; } diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index bce639a6cc..c051692395 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -3453,6 +3453,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, scmd->sc_data_direction); priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ } else { + /* + * Some firmware versions byte-swap the REPORT ZONES command + * reply from ATA-ZAC devices by directly accessing in the host + * buffer. This does not respect the default command DMA + * direction and causes IOMMU page faults on some architectures + * with an IOMMU enforcing write mappings (e.g. AMD hosts). + * Avoid such issue by making the REPORT ZONES buffer mapping + * bi-directional. + */ + if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) + scmd->sc_data_direction = DMA_BIDIRECTIONAL; sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index b2bcf4a27d..b785a7e88b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2671,6 +2671,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); } +static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd) +{ + /* + * Some firmware versions byte-swap the REPORT ZONES command reply from + * ATA-ZAC devices by directly accessing in the host buffer. This does + * not respect the default command DMA direction and causes IOMMU page + * faults on some architectures with an IOMMU enforcing write mappings + * (e.g. AMD hosts). Avoid such issue by making the report zones buffer + * mapping bi-directional. + */ + if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES) + cmd->sc_data_direction = DMA_BIDIRECTIONAL; + + return scsi_dma_map(cmd); +} + /** * _base_build_sg_scmd - main sg creation routine * pcie_device is unused here! @@ -2717,7 +2733,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; sg_scmd = scsi_sglist(scmd); - sges_left = scsi_dma_map(scmd); + sges_left = _base_scsi_dma_map(scmd); if (sges_left < 0) return -ENOMEM; @@ -2861,7 +2877,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, } sg_scmd = scsi_sglist(scmd); - sges_left = scsi_dma_map(scmd); + sges_left = _base_scsi_dma_map(scmd); if (sges_left < 0) return -ENOMEM; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 1b7561abe0..6b64af7d49 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -4119,6 +4119,8 @@ static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + if (opal_unlock_from_suspend(sdkp->opal_dev)) { sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n"); return -EIO; @@ -4135,13 +4137,12 @@ static int sd_resume_common(struct device *dev, bool runtime) if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); - if (!sd_do_start_stop(sdkp->device, runtime)) { sdkp->suspended = false; return 0; } + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); ret = sd_start_stop_device(sdkp, 1); if (!ret) { sd_resume(dev); diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c index ecddb60bd6..e785197408 100644 --- a/drivers/soc/qcom/icc-bwmon.c +++ b/drivers/soc/qcom/icc-bwmon.c @@ -783,9 +783,14 @@ static int bwmon_probe(struct platform_device *pdev) bwmon->dev = dev; bwmon_disable(bwmon); - ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, - bwmon_intr_thread, - IRQF_ONESHOT, dev_name(dev), bwmon); + + /* + * SoCs with multiple cpu-bwmon instances can end up using a shared interrupt + * line. Using the devm_ variant might result in the IRQ handler being executed + * after bwmon_disable in bwmon_remove() + */ + ret = request_threaded_irq(bwmon->irq, bwmon_intr, bwmon_intr_thread, + IRQF_ONESHOT | IRQF_SHARED, dev_name(dev), bwmon); if (ret) return dev_err_probe(dev, ret, "failed to request IRQ\n"); @@ -800,6 +805,7 @@ static void bwmon_remove(struct platform_device *pdev) struct icc_bwmon *bwmon = platform_get_drvdata(pdev); bwmon_disable(bwmon); + free_irq(bwmon->irq, bwmon); } static const struct icc_bwmon_data msm8998_bwmon_data = { diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index aa5ed254be..f2d7eedd32 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -296,7 +296,7 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi) static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) { struct lpspi_config config = fsl_lpspi->config; - unsigned int perclk_rate, scldiv; + unsigned int perclk_rate, scldiv, div; u8 prescale; perclk_rate = clk_get_rate(fsl_lpspi->clk_per); @@ -313,8 +313,10 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi) return -EINVAL; } + div = DIV_ROUND_UP(perclk_rate, config.speed_hz); + for (prescale = 0; prescale < 8; prescale++) { - scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2; + scldiv = div / (1 << prescale) - 2; if (scldiv < 256) { fsl_lpspi->config.prescale = prescale; break; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 05e6d007f9..5304728c68 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -700,6 +700,7 @@ static const struct class spidev_class = { }; static const struct spi_device_id spidev_spi_ids[] = { + { .name = "bh2228fv" }, { .name = "dh2228fv" }, { .name = "ltc2488" }, { .name = "sx1301" }, diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 791cdc160c..f36f57bde0 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -398,7 +398,7 @@ static int pmic_arb_fmt_read_cmd(struct spmi_pmic_arb_bus *bus, u8 opc, u8 sid, *offset = rc; if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", PMIC_ARB_MAX_TRANS_BYTES, len); return -EINVAL; } @@ -477,7 +477,7 @@ static int pmic_arb_fmt_write_cmd(struct spmi_pmic_arb_bus *bus, u8 opc, *offset = rc; if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { - dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + dev_err(&bus->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested\n", PMIC_ARB_MAX_TRANS_BYTES, len); return -EINVAL; } @@ -1702,7 +1702,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev, index = of_property_match_string(node, "reg-names", "cnfg"); if (index < 0) { - dev_err(dev, "cnfg reg region missing"); + dev_err(dev, "cnfg reg region missing\n"); return -EINVAL; } @@ -1712,7 +1712,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev, index = of_property_match_string(node, "reg-names", "intr"); if (index < 0) { - dev_err(dev, "intr reg region missing"); + dev_err(dev, "intr reg region missing\n"); return -EINVAL; } @@ -1737,8 +1737,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev, dev_dbg(&pdev->dev, "adding irq domain for bus %d\n", bus_index); - bus->domain = irq_domain_add_tree(dev->of_node, - &pmic_arb_irq_domain_ops, bus); + bus->domain = irq_domain_add_tree(node, &pmic_arb_irq_domain_ops, bus); if (!bus->domain) { dev_err(&pdev->dev, "unable to create irq_domain\n"); return -ENOMEM; diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index a180a98bb9..5b18a46a10 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -401,10 +401,10 @@ static void hfi_disable(void) * intel_hfi_online() - Enable HFI on @cpu * @cpu: CPU in which the HFI will be enabled * - * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package - * level. The first CPU in the die/package to come online does the full HFI + * Enable the HFI to be used in @cpu. The HFI is enabled at the package + * level. The first CPU in the package to come online does the full HFI * initialization. Subsequent CPUs will just link themselves to the HFI - * instance of their die/package. + * instance of their package. * * This function is called before enabling the thermal vector in the local APIC * in order to ensure that @cpu has an associated HFI instance when it receives @@ -414,31 +414,31 @@ void intel_hfi_online(unsigned int cpu) { struct hfi_instance *hfi_instance; struct hfi_cpu_info *info; - u16 die_id; + u16 pkg_id; /* Nothing to do if hfi_instances are missing. */ if (!hfi_instances) return; /* - * Link @cpu to the HFI instance of its package/die. It does not + * Link @cpu to the HFI instance of its package. It does not * matter whether the instance has been initialized. */ info = &per_cpu(hfi_cpu_info, cpu); - die_id = topology_logical_die_id(cpu); + pkg_id = topology_logical_package_id(cpu); hfi_instance = info->hfi_instance; if (!hfi_instance) { - if (die_id >= max_hfi_instances) + if (pkg_id >= max_hfi_instances) return; - hfi_instance = &hfi_instances[die_id]; + hfi_instance = &hfi_instances[pkg_id]; info->hfi_instance = hfi_instance; } init_hfi_cpu_index(info); /* - * Now check if the HFI instance of the package/die of @cpu has been + * Now check if the HFI instance of the package of @cpu has been * initialized (by checking its header). In such case, all we have to * do is to add @cpu to this instance's cpumask and enable the instance * if needed. @@ -504,7 +504,7 @@ free_hw_table: * * On some processors, hardware remembers previous programming settings even * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the - * die/package of @cpu are offline. See note in intel_hfi_online(). + * package of @cpu are offline. See note in intel_hfi_online(). */ void intel_hfi_offline(unsigned int cpu) { @@ -674,9 +674,13 @@ void __init intel_hfi_init(void) if (hfi_parse_features()) return; - /* There is one HFI instance per die/package. */ - max_hfi_instances = topology_max_packages() * - topology_max_dies_per_package(); + /* + * Note: HFI resources are managed at the physical package scope. + * There could be platforms that enumerate packages as Linux dies. + * Special handling would be needed if this happens on an HFI-capable + * platform. + */ + max_hfi_instances = topology_max_packages(); /* * This allocation may fail. CPU hotplug callbacks must check diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index bf0065d1c8..9547aa8f45 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -326,6 +326,7 @@ struct sc16is7xx_one { struct kthread_work reg_work; struct kthread_delayed_work ms_work; struct sc16is7xx_one_config config; + unsigned char buf[SC16IS7XX_FIFO_SIZE]; /* Rx buffer. */ unsigned int old_mctrl; u8 old_lcr; /* Value before EFR access. */ bool irda_mode; @@ -339,7 +340,6 @@ struct sc16is7xx_port { unsigned long gpio_valid_mask; #endif u8 mctrl_mask; - unsigned char buf[SC16IS7XX_FIFO_SIZE]; struct kthread_worker kworker; struct task_struct *kworker_task; struct sc16is7xx_one p[]; @@ -591,6 +591,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) SC16IS7XX_MCR_CLKSEL_BIT, prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT); + mutex_lock(&one->efr_lock); + /* Backup LCR and access special register set (DLL/DLH) */ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, @@ -605,24 +607,26 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) /* Restore LCR and access to general register set */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + mutex_unlock(&one->efr_lock); + return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div); } static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, unsigned int iir) { - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); unsigned int lsr = 0, bytes_read, i; bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false; u8 ch, flag; - if (unlikely(rxlen >= sizeof(s->buf))) { + if (unlikely(rxlen >= sizeof(one->buf))) { dev_warn_ratelimited(port->dev, "ttySC%i: Possible RX FIFO overrun: %d\n", port->line, rxlen); port->icount.buf_overrun++; /* Ensure sanity of RX level */ - rxlen = sizeof(s->buf); + rxlen = sizeof(one->buf); } while (rxlen) { @@ -635,10 +639,10 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, lsr = 0; if (read_lsr) { - s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); + one->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); bytes_read = 1; } else { - sc16is7xx_fifo_read(port, s->buf, rxlen); + sc16is7xx_fifo_read(port, one->buf, rxlen); bytes_read = rxlen; } @@ -671,7 +675,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, } for (i = 0; i < bytes_read; ++i) { - ch = s->buf[i]; + ch = one->buf[i]; if (uart_handle_sysrq_char(port, ch)) continue; @@ -689,10 +693,10 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, static void sc16is7xx_handle_tx(struct uart_port *port) { - struct sc16is7xx_port *s = dev_get_drvdata(port->dev); struct tty_port *tport = &port->state->port; unsigned long flags; unsigned int txlen; + unsigned char *tail; if (unlikely(port->x_char)) { sc16is7xx_port_write(port, SC16IS7XX_THR_REG, port->x_char); @@ -717,8 +721,9 @@ static void sc16is7xx_handle_tx(struct uart_port *port) txlen = 0; } - txlen = uart_fifo_out(port, s->buf, txlen); - sc16is7xx_fifo_write(port, s->buf, txlen); + txlen = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, txlen); + sc16is7xx_fifo_write(port, tail, txlen); + uart_xmit_advance(port, txlen); uart_port_lock_irqsave(port, &flags); if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 2a8006e3d6..9967444eae 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -881,6 +881,14 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, new_flags = (__force upf_t)new_info->flags; old_custom_divisor = uport->custom_divisor; + if (!(uport->flags & UPF_FIXED_PORT)) { + unsigned int uartclk = new_info->baud_base * 16; + /* check needs to be done here before other settings made */ + if (uartclk == 0) { + retval = -EINVAL; + goto exit; + } + } if (!capable(CAP_SYS_ADMIN)) { retval = -EPERM; if (change_irq || change_port || diff --git a/drivers/tty/vt/conmakehash.c b/drivers/tty/vt/conmakehash.c index dc2177fec7..82d9db68b2 100644 --- a/drivers/tty/vt/conmakehash.c +++ b/drivers/tty/vt/conmakehash.c @@ -11,6 +11,8 @@ * Copyright (C) 1995-1997 H. Peter Anvin */ +#include <libgen.h> +#include <linux/limits.h> #include <stdio.h> #include <stdlib.h> #include <sysexits.h> @@ -76,8 +78,8 @@ static void addpair(int fp, int un) int main(int argc, char *argv[]) { FILE *ctbl; - const char *tblname, *rel_tblname; - const char *abs_srctree; + const char *tblname; + char base_tblname[PATH_MAX]; char buffer[65536]; int fontlen; int i, nuni, nent; @@ -102,16 +104,6 @@ int main(int argc, char *argv[]) } } - abs_srctree = getenv("abs_srctree"); - if (abs_srctree && !strncmp(abs_srctree, tblname, strlen(abs_srctree))) - { - rel_tblname = tblname + strlen(abs_srctree); - while (*rel_tblname == '/') - ++rel_tblname; - } - else - rel_tblname = tblname; - /* For now we assume the default font is always 256 characters. */ fontlen = 256; @@ -253,6 +245,8 @@ int main(int argc, char *argv[]) for ( i = 0 ; i < fontlen ; i++ ) nuni += unicount[i]; + strncpy(base_tblname, tblname, PATH_MAX); + base_tblname[PATH_MAX - 1] = 0; printf("\ /*\n\ * Do not edit this file; it was automatically generated by\n\ @@ -264,7 +258,7 @@ int main(int argc, char *argv[]) #include <linux/types.h>\n\ \n\ u8 dfont_unicount[%d] = \n\ -{\n\t", rel_tblname, fontlen); +{\n\t", basename(base_tblname), fontlen); for ( i = 0 ; i < fontlen ; i++ ) { diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index f42d99ce5b..81d6f0cfb1 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -329,6 +329,11 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba) return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev); } +static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba) +{ + return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev); +} + static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba) { return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 46433ecf0c..5864d65448 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -4086,11 +4086,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US - delta; else - return; /* no more delay required */ + min_sleep_time_us = 0; /* no more delay required */ } - /* allow sleep for extra 50us if needed */ - usleep_range(min_sleep_time_us, min_sleep_time_us + 50); + if (min_sleep_time_us > 0) { + /* allow sleep for extra 50us if needed */ + usleep_range(min_sleep_time_us, min_sleep_time_us + 50); + } + + /* update the last_dme_cmd_tstamp */ + hba->last_dme_cmd_tstamp = ktime_get(); } /** @@ -8171,7 +8176,10 @@ static void ufshcd_update_rtc(struct ufs_hba *hba) */ val = ts64.tv_sec - hba->dev_info.rtc_time_baseline; - ufshcd_rpm_get_sync(hba); + /* Skip update RTC if RPM state is not RPM_ACTIVE */ + if (ufshcd_rpm_get_if_active(hba) <= 0) + return; + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED, 0, 0, &val); ufshcd_rpm_put_sync(hba); @@ -10220,9 +10228,6 @@ int ufshcd_system_restore(struct device *dev) */ ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H); - /* Resuming from hibernate, assume that link was OFF */ - ufshcd_set_link_off(hba); - return 0; } diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 1f21459b11..a7bc715bca 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3731,10 +3731,10 @@ static int ffs_func_set_alt(struct usb_function *f, struct ffs_data *ffs = func->ffs; int ret = 0, intf; - if (alt > MAX_ALT_SETTINGS) - return -EINVAL; - if (alt != (unsigned)-1) { + if (alt > MAX_ALT_SETTINGS) + return -EINVAL; + intf = ffs_func_revmap_intf(func, interface); if (intf < 0) return intf; diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c index 0e38bb145e..6908fdd4a8 100644 --- a/drivers/usb/gadget/function/f_midi2.c +++ b/drivers/usb/gadget/function/f_midi2.c @@ -642,12 +642,21 @@ static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data) if (format) return; // invalid blk = (*data >> 8) & 0xff; - if (blk >= ep->num_blks) - return; - if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) - reply_ump_stream_fb_info(ep, blk); - if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) - reply_ump_stream_fb_name(ep, blk); + if (blk == 0xff) { + /* inquiry for all blocks */ + for (blk = 0; blk < ep->num_blks; blk++) { + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) + reply_ump_stream_fb_info(ep, blk); + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) + reply_ump_stream_fb_name(ep, blk); + } + } else if (blk < ep->num_blks) { + /* only the specified block */ + if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO) + reply_ump_stream_fb_info(ep, blk); + if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME) + reply_ump_stream_fb_name(ep, blk); + } return; } } diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 89af0feb75..2429957697 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -592,16 +592,25 @@ int u_audio_start_capture(struct g_audio *audio_dev) struct usb_ep *ep, *ep_fback; struct uac_rtd_params *prm; struct uac_params *params = &audio_dev->params; - int req_len, i; + int req_len, i, ret; prm = &uac->c_prm; dev_dbg(dev, "start capture with rate %d\n", prm->srate); ep = audio_dev->out_ep; - config_ep_by_speed(gadget, &audio_dev->func, ep); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); + if (ret < 0) { + dev_err(dev, "config_ep_by_speed for out_ep failed (%d)\n", ret); + return ret; + } + req_len = ep->maxpacket; prm->ep_enabled = true; - usb_ep_enable(ep); + ret = usb_ep_enable(ep); + if (ret < 0) { + dev_err(dev, "usb_ep_enable failed for out_ep (%d)\n", ret); + return ret; + } for (i = 0; i < params->req_number; i++) { if (!prm->reqs[i]) { @@ -629,9 +638,18 @@ int u_audio_start_capture(struct g_audio *audio_dev) return 0; /* Setup feedback endpoint */ - config_ep_by_speed(gadget, &audio_dev->func, ep_fback); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep_fback); + if (ret < 0) { + dev_err(dev, "config_ep_by_speed in_ep_fback failed (%d)\n", ret); + return ret; // TODO: Clean up out_ep + } + prm->fb_ep_enabled = true; - usb_ep_enable(ep_fback); + ret = usb_ep_enable(ep_fback); + if (ret < 0) { + dev_err(dev, "usb_ep_enable failed for in_ep_fback (%d)\n", ret); + return ret; // TODO: Clean up out_ep + } req_len = ep_fback->maxpacket; req_fback = usb_ep_alloc_request(ep_fback, GFP_ATOMIC); @@ -687,13 +705,17 @@ int u_audio_start_playback(struct g_audio *audio_dev) struct uac_params *params = &audio_dev->params; unsigned int factor; const struct usb_endpoint_descriptor *ep_desc; - int req_len, i; + int req_len, i, ret; unsigned int p_pktsize; prm = &uac->p_prm; dev_dbg(dev, "start playback with rate %d\n", prm->srate); ep = audio_dev->in_ep; - config_ep_by_speed(gadget, &audio_dev->func, ep); + ret = config_ep_by_speed(gadget, &audio_dev->func, ep); + if (ret < 0) { + dev_err(dev, "config_ep_by_speed for in_ep failed (%d)\n", ret); + return ret; + } ep_desc = ep->desc; /* @@ -720,7 +742,11 @@ int u_audio_start_playback(struct g_audio *audio_dev) uac->p_residue_mil = 0; prm->ep_enabled = true; - usb_ep_enable(ep); + ret = usb_ep_enable(ep); + if (ret < 0) { + dev_err(dev, "usb_ep_enable failed for in_ep (%d)\n", ret); + return ret; + } for (i = 0; i < params->req_number; i++) { if (!prm->reqs[i]) { diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index a92eb6d909..8962f96ae7 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -1441,6 +1441,7 @@ void gserial_suspend(struct gserial *gser) spin_lock(&port->port_lock); spin_unlock(&serial_port_lock); port->suspended = true; + port->start_delayed = true; spin_unlock_irqrestore(&port->port_lock, flags); } EXPORT_SYMBOL_GPL(gserial_suspend); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 2dfae7a17b..81f9140f36 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -118,12 +118,10 @@ int usb_ep_enable(struct usb_ep *ep) goto out; /* UDC drivers can't handle endpoints with maxpacket size 0 */ - if (usb_endpoint_maxp(ep->desc) == 0) { - /* - * We should log an error message here, but we can't call - * dev_err() because there's no way to find the gadget - * given only ep. - */ + if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { + WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, + (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); + ret = -EINVAL; goto out; } diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 6934970f18..5a8869cd95 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -76,6 +76,11 @@ static void usb_debug_process_read_urb(struct urb *urb) usb_serial_generic_process_read_urb(urb); } +static void usb_debug_init_termios(struct tty_struct *tty) +{ + tty->termios.c_lflag &= ~(ECHO | ECHONL); +} + static struct usb_serial_driver debug_device = { .driver = { .owner = THIS_MODULE, @@ -85,6 +90,7 @@ static struct usb_serial_driver debug_device = { .num_ports = 1, .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, .break_ctl = usb_debug_break_ctl, + .init_termios = usb_debug_init_termios, .process_read_urb = usb_debug_process_read_urb, }; @@ -96,6 +102,7 @@ static struct usb_serial_driver dbc_device = { .id_table = dbc_id_table, .num_ports = 1, .break_ctl = usb_debug_break_ctl, + .init_termios = usb_debug_init_termios, .process_read_urb = usb_debug_process_read_urb, }; diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c index cb7cdf90cb..cd23533983 100644 --- a/drivers/usb/typec/mux/fsa4480.c +++ b/drivers/usb/typec/mux/fsa4480.c @@ -13,6 +13,10 @@ #include <linux/usb/typec_dp.h> #include <linux/usb/typec_mux.h> +#define FSA4480_DEVICE_ID 0x00 + #define FSA4480_DEVICE_ID_VENDOR_ID GENMASK(7, 6) + #define FSA4480_DEVICE_ID_VERSION_ID GENMASK(5, 3) + #define FSA4480_DEVICE_ID_REV_ID GENMASK(2, 0) #define FSA4480_SWITCH_ENABLE 0x04 #define FSA4480_SWITCH_SELECT 0x05 #define FSA4480_SWITCH_STATUS1 0x07 @@ -251,6 +255,7 @@ static int fsa4480_probe(struct i2c_client *client) struct typec_switch_desc sw_desc = { }; struct typec_mux_desc mux_desc = { }; struct fsa4480 *fsa; + int val = 0; int ret; fsa = devm_kzalloc(dev, sizeof(*fsa), GFP_KERNEL); @@ -268,6 +273,15 @@ static int fsa4480_probe(struct i2c_client *client) if (IS_ERR(fsa->regmap)) return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n"); + ret = regmap_read(fsa->regmap, FSA4480_DEVICE_ID, &val); + if (ret || !val) + return dev_err_probe(dev, -ENODEV, "FSA4480 not found\n"); + + dev_dbg(dev, "Found FSA4480 v%lu.%lu (Vendor ID = %lu)\n", + FIELD_GET(FSA4480_DEVICE_ID_VERSION_ID, val), + FIELD_GET(FSA4480_DEVICE_ID_REV_ID, val), + FIELD_GET(FSA4480_DEVICE_ID_VENDOR_ID, val)); + /* Safe mode */ fsa->cur_enable = FSA4480_ENABLE_DEVICE | FSA4480_ENABLE_USB; fsa->mode = TYPEC_STATE_SAFE; diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 82650c11e4..302a89aeb2 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -745,6 +745,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag * */ if (usb_pipedevice(urb->pipe) == 0) { + struct usb_device *old; __u8 type = usb_pipetype(urb->pipe); struct usb_ctrlrequest *ctrlreq = (struct usb_ctrlrequest *) urb->setup_packet; @@ -755,14 +756,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag goto no_need_xmit; } + old = vdev->udev; switch (ctrlreq->bRequest) { case USB_REQ_SET_ADDRESS: /* set_address may come when a device is reset */ dev_info(dev, "SetAddress Request (%d) to port %d\n", ctrlreq->wValue, vdev->rhport); - usb_put_dev(vdev->udev); vdev->udev = usb_get_dev(urb->dev); + usb_put_dev(old); spin_lock(&vdev->ud.lock); vdev->ud.status = VDEV_ST_USED; @@ -781,8 +783,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag usbip_dbg_vhci_hc( "Not yet?:Get_Descriptor to device 0 (get max pipe size)\n"); - usb_put_dev(vdev->udev); vdev->udev = usb_get_dev(urb->dev); + usb_put_dev(old); goto out; default: @@ -1067,6 +1069,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) static void vhci_device_reset(struct usbip_device *ud) { struct vhci_device *vdev = container_of(ud, struct vhci_device, ud); + struct usb_device *old = vdev->udev; unsigned long flags; spin_lock_irqsave(&ud->lock, flags); @@ -1074,8 +1077,8 @@ static void vhci_device_reset(struct usbip_device *ud) vdev->speed = 0; vdev->devid = 0; - usb_put_dev(vdev->udev); vdev->udev = NULL; + usb_put_dev(old); if (ud->tcp_socket) { sockfd_put(ud->tcp_socket); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 63a53680a8..6b9c12acf4 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1483,13 +1483,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf) notify = ops->get_vq_notification(vdpa, index); - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (remap_pfn_range(vma, vmf->address & PAGE_MASK, - PFN_DOWN(notify.addr), PAGE_SIZE, - vma->vm_page_prot)) - return VM_FAULT_SIGBUS; - - return VM_FAULT_NOPAGE; + return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr)); } static const struct vm_operations_struct vhost_vdpa_vm_ops = { diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 67dfa47788..c9c620e32f 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -845,7 +845,7 @@ out: #ifdef CONFIG_XEN_PRIVCMD_EVENTFD /* Irqfd support */ static struct workqueue_struct *irqfd_cleanup_wq; -static DEFINE_MUTEX(irqfds_lock); +static DEFINE_SPINLOCK(irqfds_lock); static LIST_HEAD(irqfds_list); struct privcmd_kernel_irqfd { @@ -909,9 +909,11 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) irqfd_inject(kirqfd); if (flags & EPOLLHUP) { - mutex_lock(&irqfds_lock); + unsigned long flags; + + spin_lock_irqsave(&irqfds_lock, flags); irqfd_deactivate(kirqfd); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); } return 0; @@ -929,6 +931,7 @@ irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags; __poll_t events; struct fd f; void *dm_op; @@ -968,18 +971,18 @@ static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd) init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup); init_poll_funcptr(&kirqfd->pt, irqfd_poll_func); - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry(tmp, &irqfds_list, list) { if (kirqfd->eventfd == tmp->eventfd) { ret = -EBUSY; - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); goto error_eventfd; } } list_add_tail(&kirqfd->list, &irqfds_list); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); /* * Check if there was an event already pending on the eventfd before we @@ -1011,12 +1014,13 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) { struct privcmd_kernel_irqfd *kirqfd; struct eventfd_ctx *eventfd; + unsigned long flags; eventfd = eventfd_ctx_fdget(irqfd->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry(kirqfd, &irqfds_list, list) { if (kirqfd->eventfd == eventfd) { @@ -1025,7 +1029,7 @@ static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd) } } - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); eventfd_ctx_put(eventfd); @@ -1073,13 +1077,14 @@ static int privcmd_irqfd_init(void) static void privcmd_irqfd_exit(void) { struct privcmd_kernel_irqfd *kirqfd, *tmp; + unsigned long flags; - mutex_lock(&irqfds_lock); + spin_lock_irqsave(&irqfds_lock, flags); list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list) irqfd_deactivate(kirqfd); - mutex_unlock(&irqfds_lock); + spin_unlock_irqrestore(&irqfds_lock, flags); destroy_workqueue(irqfd_cleanup_wq); } |