diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-01 17:14:06 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-01 17:14:06 +0000 |
commit | e616d72f8b12df275e5afd05b0f5ed251f6d56a1 (patch) | |
tree | cfbdd162315fce0266b5aa12c070ff08a6d8f4a7 /drivers | |
parent | Releasing progress-linux version 6.9.2-1~exp1~progress7.99u1. (diff) | |
download | linux-e616d72f8b12df275e5afd05b0f5ed251f6d56a1.tar.xz linux-e616d72f8b12df275e5afd05b0f5ed251f6d56a1.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
724 files changed, 7869 insertions, 4092 deletions
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c index 736c2eb8c0..f677ad2177 100644 --- a/drivers/accessibility/speakup/main.c +++ b/drivers/accessibility/speakup/main.c @@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc) } attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr); buf[cnt++] = attr_ch; - while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) { + while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) { tmp_pos += 2; tmpx++; ch = get_char(vc, (u_short *)tmp_pos, &temp); diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 04e273167e..8e01792228 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -325,6 +325,7 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = { static const struct property_entry bsw_spi_properties[] = { PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP), + PROPERTY_ENTRY_U32("num-cs", 2), { } }; diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 30f3fc13c2..8d18af396d 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -5,6 +5,7 @@ ccflags-y := -D_LINUX -DBUILDING_ACPICA ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT +CFLAGS_tbfind.o += $(call cc-disable-warning, stringop-truncation) # use acpi.o to put all files here into acpi.o modparam namespace obj-y += acpi.o diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index ddd072cbc7..2133085ded 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -191,6 +191,10 @@ void acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id, u32 function); +void +acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node, + acpi_adr_space_type space_id); + acpi_status acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function); diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 18fdf2bc2d..dc6004daf6 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[]; /* Local prototypes */ -static void -acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, - acpi_adr_space_type space_id); - static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, u32 level, void *context, void **return_value); @@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle, * ******************************************************************************/ -static void +void acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, acpi_adr_space_type space_id) { diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 3197e6303c..624361a5f3 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) } ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods) + +/******************************************************************************* + * + * FUNCTION: acpi_execute_orphan_reg_method + * + * PARAMETERS: device - Handle for the device + * space_id - The address space ID + * + * RETURN: Status + * + * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI + * device. This is a _REG method that has no corresponding region + * within the device's scope. + * + ******************************************************************************/ +acpi_status +acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id) +{ + struct acpi_namespace_node *node; + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method); + + /* Parameter validation */ + + if (!device) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_validate_handle(device); + if (node) { + + /* + * If an "orphan" _REG method is present in the device's scope + * for the given address space ID, run it. + */ + + acpi_ev_execute_orphan_reg_method(node, space_id); + } else { + status = AE_BAD_PARAMETER; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method) diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 8907b8bf42..c49b9f8de7 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function, struct acpi_mem_mapping *mm = mem_info->cur_mm; u32 length; acpi_size map_length; - acpi_size page_boundary_map_length; #ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED u32 remainder; #endif @@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function, map_length = (acpi_size) ((mem_info->address + mem_info->length) - address); - /* - * If mapping the entire remaining portion of the region will cross - * a page boundary, just map up to the page boundary, do not cross. - * On some systems, crossing a page boundary while mapping regions - * can cause warnings if the pages have different attributes - * due to resource management. - * - * This has the added benefit of constraining a single mapping to - * one page, which is similar to the original code that used a 4k - * maximum window. - */ - page_boundary_map_length = (acpi_size) - (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address); - if (page_boundary_map_length == 0) { - page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE; - } - - if (map_length > page_boundary_map_length) { - map_length = page_boundary_map_length; - } + if (map_length > ACPI_DEFAULT_PAGE_SIZE) + map_length = ACPI_DEFAULT_PAGE_SIZE; /* Create a new mapping starting at the address given */ diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index 01faca3a23..bb9f8475ce 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -903,7 +903,7 @@ static void __exit einj_exit(void) if (einj_initialized) platform_driver_unregister(&einj_driver); - platform_device_del(einj_dev); + platform_device_unregister(einj_dev); } module_init(einj_init); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d9fa730416..a87b10eef7 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -316,9 +316,14 @@ static void acpi_bus_osc_negotiate_platform_control(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_PROCESSOR)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_THERMAL)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_PRMT)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_FFH)) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 02255795b8..1cec29ab64 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1482,13 +1482,14 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec) static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, bool call_reg) { + acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; acpi_status status; acpi_ec_start(ec, false); if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { acpi_ec_enter_noirq(ec); - status = acpi_install_address_space_handler_no_reg(ec->handle, + status = acpi_install_address_space_handler_no_reg(scope_handle, ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, NULL, ec); @@ -1497,11 +1498,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, return -ENODEV; } set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); - ec->address_space_handler_holder = ec->handle; } if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { - acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC); + acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC); + if (scope_handle != ec->handle) + acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC); + set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); } @@ -1553,10 +1556,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, static void ec_remove_handlers(struct acpi_ec *ec) { + acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; + if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { if (ACPI_FAILURE(acpi_remove_address_space_handler( - ec->address_space_handler_holder, - ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) + scope_handle, + ACPI_ADR_SPACE_EC, + &acpi_ec_space_handler))) pr_err("failed to remove space handler\n"); clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } @@ -1595,14 +1601,18 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool ca { int ret; - ret = ec_install_handlers(ec, device, call_reg); - if (ret) - return ret; - /* First EC capable of handling transactions */ if (!first_ec) first_ec = ec; + ret = ec_install_handlers(ec, device, call_reg); + if (ret) { + if (ec == first_ec) + first_ec = NULL; + + return ret; + } + pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, ec->data_addr); diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index ca72a0dc57..a96d1bc662 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -185,7 +185,6 @@ enum acpi_ec_event_state { struct acpi_ec { acpi_handle handle; - acpi_handle address_space_handler_holder; int gpe; int irq; unsigned long command_addr; @@ -302,6 +301,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle); void acpi_mipi_scan_crs_csi2(void); void acpi_mipi_init_crs_csi2_swnodes(void); void acpi_mipi_crs_csi2_cleanup(void); +#ifdef CONFIG_X86 bool acpi_graph_ignore_port(acpi_handle handle); +#else +static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; } +#endif #endif /* _ACPI_INTERNAL_H_ */ diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c index d05413a067..0ab13751f0 100644 --- a/drivers/acpi/mipi-disco-img.c +++ b/drivers/acpi/mipi-disco-img.c @@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void) acpi_mipi_del_crs_csi2(csi2); } -static const struct dmi_system_id dmi_ignore_port_nodes[] = { - { - .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"), - }, - }, - { } +#ifdef CONFIG_X86 +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */ +static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = { + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL), + {} }; static const char *strnext(const char *s1, const char *s2) @@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle) static bool dmi_tested, ignore_port; if (!dmi_tested) { - ignore_port = dmi_first_match(dmi_ignore_port_nodes); + if (dmi_name_in_vendors("Dell Inc.") && + x86_match_cpu(dell_broken_mipi_disco_cpu_gens)) + ignore_port = true; + dmi_tested = true; } @@ -794,3 +803,4 @@ out_free: kfree(orig_path); return false; } +#endif diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index e45e64993c..3b09fd39ee 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -208,6 +208,11 @@ int __init srat_disabled(void) return acpi_numa < 0; } +__weak int __init numa_fill_memblks(u64 start, u64 end) +{ + return NUMA_NO_MEMBLK; +} + #if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 59423fe9d0..b5bf8b81a0 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -518,6 +518,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { }, }, { + /* Asus Vivobook Pro N6506MV */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "N6506MV"), + }, + }, + { /* LG Electronics 17U70P */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), @@ -534,6 +541,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { */ static const struct dmi_system_id irq1_edge_low_force_override[] = { { + /* XMG APEX 17 (M23) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"), + }, + }, + { /* TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), @@ -630,6 +643,18 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = { DMI_MATCH(DMI_BOARD_NAME, "X565"), }, }, + { + /* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"), + }, + }, + { + /* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + }, + }, { } }; diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index d67881b50b..a0cfc857fb 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k) { + int temp; + if (temp_deci_k == THERMAL_TEMP_INVALID) return THERMAL_TEMP_INVALID; - return deci_kelvin_to_millicelsius_with_offset(temp_deci_k, + temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k, tz->kelvin_offset); + if (temp <= 0) + return THERMAL_TEMP_INVALID; + + return temp; } static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip) diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 9fdcc620c6..2cc3821b2b 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -499,6 +499,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_native, + /* Lenovo Slim 7 16ARH7 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82UX"), + }, + }, + { + .callback = video_detect_force_native, /* Lenovo ThinkPad X131e (3371 AMD version) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 90c3d2eab9..448e0d14fd 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -197,16 +197,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s } /* - * AMD systems from Renoir and Lucienne *require* that the NVME controller + * AMD systems from Renoir onwards *require* that the NVME controller * is put into D3 over a Modern Standby / suspend-to-idle cycle. * * This is "typically" accomplished using the `StorageD3Enable` * property in the _DSD that is checked via the `acpi_storage_d3` function - * but this property was introduced after many of these systems launched - * and most OEM systems don't have it in their BIOS. + * but some OEM systems still don't have it in their BIOS. * * The Microsoft documentation for StorageD3Enable mentioned that Windows has - * a hardcoded allowlist for D3 support, which was used for these platforms. + * a hardcoded allowlist for D3 support as well as a registry key to override + * the BIOS, which has been used for these cases. * * This allows quirking on Linux in a similar fashion. * @@ -219,19 +219,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s * https://bugzilla.kernel.org/show_bug.cgi?id=216773 * https://bugzilla.kernel.org/show_bug.cgi?id=217003 * 2) On at least one HP system StorageD3Enable is missing on the second NVME - disk in the system. + * disk in the system. + * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only + * NVME device. */ -static const struct x86_cpu_id storage_d3_cpu_ids[] = { - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */ - {} -}; - bool force_storage_d3(void) { - return x86_match_cpu(storage_d3_cpu_ids); + if (!cpu_feature_enabled(X86_FEATURE_ZEN)) + return false; + return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0; } /* @@ -260,9 +256,10 @@ bool force_storage_d3(void) #define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0) #define ACPI_QUIRK_UART1_SKIP BIT(1) #define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2) -#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3) -#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4) -#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5) +#define ACPI_QUIRK_PNP_UART1_SKIP BIT(3) +#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(4) +#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(5) +#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(6) static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* @@ -342,6 +339,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_PNP_UART1_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { @@ -440,14 +438,18 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo if (ret) return 0; - /* to not match on PNP enumerated debug UARTs */ - if (!dev_is_platform(controller_parent)) - return 0; - dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); if (dmi_id) quirks = (unsigned long)dmi_id->driver_data; + if (!dev_is_platform(controller_parent)) { + /* PNP enumerated UARTs */ + if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1) + *skip = true; + + return 0; + } + if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1) *skip = true; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 6548f10e61..5eb38fbbbe 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -429,7 +429,6 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */ /* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */ { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */ - { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -1736,6 +1735,14 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap) if (ap->pflags & ATA_PFLAG_EXTERNAL) return; + /* If no LPM states are supported by the HBA, do not bother with LPM */ + if ((ap->host->flags & ATA_HOST_NO_PART) && + (ap->host->flags & ATA_HOST_NO_SSC) && + (ap->host->flags & ATA_HOST_NO_DEVSLP)) { + ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n"); + return; + } + /* user modified policy via module param */ if (mobile_lpm_policy != -1) { policy = mobile_lpm_policy; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c449d60d9b..28caed151e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4180,8 +4180,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM }, { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM }, - /* Crucial BX100 SSD 500GB has broken LPM support */ + /* Crucial devices with broken LPM support */ { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, + { "CT240BX500SSD1", NULL, ATA_HORKAGE_NOLPM }, /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | @@ -4199,6 +4200,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM | ATA_HORKAGE_NOLPM }, + /* Apacer models with LPM issues */ + { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM }, + + /* AMD Radeon devices with broken LPM support */ + { "R3SL240G", NULL, ATA_HORKAGE_NOLPM }, + /* These specific Samsung models/firmware-revs do not handle LPM well */ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM }, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e954976891..9c3daa7d19 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1828,11 +1828,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 2 }; - /* set scsi removable (RMB) bit per ata bit, or if the - * AHCI port says it's external (Hotplug-capable, eSATA). + /* + * Set the SCSI Removable Media Bit (RMB) if the ATA removable media + * device bit (obsolete since ATA-8 ACS) is set. */ - if (ata_id_removable(args->id) || - (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) + if (ata_id_removable(args->id)) hdr[1] |= (1 << 7); if (args->dev->class == ATA_DEV_ZAC) { diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 448a511cbc..e7ac142c24 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -173,8 +173,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 }; static struct legacy_probe probe_list[NR_HOST]; static struct legacy_data legacy_data[NR_HOST]; static struct ata_host *legacy_host[NR_HOST]; -static int nr_legacy_host; - /** * legacy_probe_add - Add interface to probe list @@ -1276,9 +1274,11 @@ static __exit void legacy_exit(void) { int i; - for (i = 0; i < nr_legacy_host; i++) { + for (i = 0; i < NR_HOST; i++) { struct legacy_data *ld = &legacy_data[i]; - ata_host_detach(legacy_host[i]); + + if (legacy_host[i]) + ata_host_detach(legacy_host[i]); platform_device_unregister(ld->platform_dev); } } diff --git a/drivers/base/base.h b/drivers/base/base.h index 0738ccad08..db4f910e8e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -192,11 +192,14 @@ extern struct kset *devices_kset; void devices_kset_move_last(struct device *dev); #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) -void module_add_driver(struct module *mod, struct device_driver *drv); +int module_add_driver(struct module *mod, struct device_driver *drv); void module_remove_driver(struct device_driver *drv); #else -static inline void module_add_driver(struct module *mod, - struct device_driver *drv) { } +static inline int module_add_driver(struct module *mod, + struct device_driver *drv) +{ + return 0; +} static inline void module_remove_driver(struct device_driver *drv) { } #endif diff --git a/drivers/base/bus.c b/drivers/base/bus.c index daee55c9b2..ffea0728b8 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -674,7 +674,12 @@ int bus_add_driver(struct device_driver *drv) if (error) goto out_del_list; } - module_add_driver(drv->owner, drv); + error = module_add_driver(drv->owner, drv); + if (error) { + printk(KERN_ERR "%s: failed to create module links for %s\n", + __func__, drv->name); + goto out_detach; + } error = driver_create_file(drv, &driver_attr_uevent); if (error) { @@ -699,6 +704,8 @@ int bus_add_driver(struct device_driver *drv) return 0; +out_detach: + driver_detach(drv); out_del_list: klist_del(&priv->knode_bus); out_unregister: diff --git a/drivers/base/core.c b/drivers/base/core.c index 5f4e03336e..8e3bd230b1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2738,8 +2738,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; + /* Synchronize with really_probe() */ + device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); + device_unlock(dev); if (retval) goto out; diff --git a/drivers/base/module.c b/drivers/base/module.c index 46ad4d6367..a1b55da071 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -30,14 +30,14 @@ static void module_create_drivers_dir(struct module_kobject *mk) mutex_unlock(&drivers_dir_mutex); } -void module_add_driver(struct module *mod, struct device_driver *drv) +int module_add_driver(struct module *mod, struct device_driver *drv) { char *driver_name; - int no_warn; struct module_kobject *mk = NULL; + int ret; if (!drv) - return; + return 0; if (mod) mk = &mod->mkobj; @@ -56,17 +56,37 @@ void module_add_driver(struct module *mod, struct device_driver *drv) } if (!mk) - return; + return 0; + + ret = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module"); + if (ret) + return ret; - /* Don't check return codes; these calls are idempotent */ - no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module"); driver_name = make_driver_name(drv); - if (driver_name) { - module_create_drivers_dir(mk); - no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, - driver_name); - kfree(driver_name); + if (!driver_name) { + ret = -ENOMEM; + goto out; + } + + module_create_drivers_dir(mk); + if (!mk->drivers_dir) { + ret = -EINVAL; + goto out; } + + ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); + if (ret) + goto out; + + kfree(driver_name); + + return 0; +out: + sysfs_remove_link(&drv->p->kobj, "module"); + sysfs_remove_link(mk->drivers_dir, driver_name); + kfree(driver_name); + + return ret; } void module_remove_driver(struct device_driver *drv) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9d4ec9273b..1ddd3e5497 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -588,7 +588,10 @@ static inline int was_interrupted(int result) return result == -ERESTARTSYS || result == -EINTR; } -/* always call with the tx_lock held */ +/* + * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns + * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed. + */ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); @@ -605,6 +608,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) u32 nbd_cmd_flags = 0; int sent = nsock->sent, skip = 0; + lockdep_assert_held(&cmd->lock); + lockdep_assert_held(&nsock->tx_lock); + iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request)); type = req_to_nbd_cmd_type(req); @@ -669,7 +675,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) nsock->sent = sent; } set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return BLK_STS_RESOURCE; + return (__force int)BLK_STS_RESOURCE; } dev_err_ratelimited(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); @@ -710,7 +716,7 @@ send_pages: nsock->pending = req; nsock->sent = sent; set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return BLK_STS_RESOURCE; + return (__force int)BLK_STS_RESOURCE; } dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", @@ -1007,7 +1013,7 @@ static int wait_for_reconnect(struct nbd_device *nbd) return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); } -static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) +static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); struct nbd_device *nbd = cmd->nbd; @@ -1015,18 +1021,20 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) struct nbd_sock *nsock; int ret; + lockdep_assert_held(&cmd->lock); + config = nbd_get_config_unlocked(nbd); if (!config) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Socks array is empty\n"); - return -EINVAL; + return BLK_STS_IOERR; } if (index >= config->num_connections) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on invalid socket\n"); nbd_config_put(nbd); - return -EINVAL; + return BLK_STS_IOERR; } cmd->status = BLK_STS_OK; again: @@ -1049,7 +1057,7 @@ again: */ sock_shutdown(nbd); nbd_config_put(nbd); - return -EIO; + return BLK_STS_IOERR; } goto again; } @@ -1062,7 +1070,7 @@ again: blk_mq_start_request(req); if (unlikely(nsock->pending && nsock->pending != req)) { nbd_requeue_cmd(cmd); - ret = 0; + ret = BLK_STS_OK; goto out; } /* @@ -1081,19 +1089,19 @@ again: "Request send failed, requeueing\n"); nbd_mark_nsock_dead(nbd, nsock, 1); nbd_requeue_cmd(cmd); - ret = 0; + ret = BLK_STS_OK; } out: mutex_unlock(&nsock->tx_lock); nbd_config_put(nbd); - return ret; + return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret; } static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); - int ret; + blk_status_t ret; /* * Since we look at the bio's to send the request over the network we @@ -1113,10 +1121,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, * appropriate. */ ret = nbd_handle_cmd(cmd, hctx->queue_num); - if (ret < 0) - ret = BLK_STS_IOERR; - else if (!ret) - ret = BLK_STS_OK; mutex_unlock(&cmd->lock); return ret; diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index ed33cf7192..620679a0ac 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -404,13 +404,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev, static int nullb_apply_submit_queues(struct nullb_device *dev, unsigned int submit_queues) { - return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); + int ret; + + mutex_lock(&lock); + ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); + mutex_unlock(&lock); + + return ret; } static int nullb_apply_poll_queues(struct nullb_device *dev, unsigned int poll_queues) { - return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); + int ret; + + mutex_lock(&lock); + ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); + mutex_unlock(&lock); + + return ret; } NULLB_DEVICE_ATTR(size, ulong, NULL); @@ -457,28 +469,32 @@ static ssize_t nullb_device_power_store(struct config_item *item, if (ret < 0) return ret; + ret = count; + mutex_lock(&lock); if (!dev->power && newp) { if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) - return count; + goto out; + ret = null_add_dev(dev); if (ret) { clear_bit(NULLB_DEV_FL_UP, &dev->flags); - return ret; + goto out; } set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); dev->power = newp; + ret = count; } else if (dev->power && !newp) { if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { - mutex_lock(&lock); dev->power = newp; null_del_dev(dev->nullb); - mutex_unlock(&lock); } clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); } - return count; +out: + mutex_unlock(&lock); + return ret; } CONFIGFS_ATTR(nullb_device_, power); @@ -1918,15 +1934,12 @@ static int null_add_dev(struct nullb_device *dev) nullb->q->queuedata = nullb; blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); - mutex_lock(&lock); rv = ida_alloc(&nullb_indexes, GFP_KERNEL); - if (rv < 0) { - mutex_unlock(&lock); + if (rv < 0) goto out_cleanup_disk; - } + nullb->index = rv; dev->index = rv; - mutex_unlock(&lock); if (config_item_name(&dev->group.cg_item)) { /* Use configfs dir name as the device name */ @@ -1955,9 +1968,7 @@ static int null_add_dev(struct nullb_device *dev) if (rv) goto out_ida_free; - mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); - mutex_unlock(&lock); pr_info("disk %s created\n", nullb->disk_name); @@ -2006,7 +2017,9 @@ static int null_create_dev(void) if (!dev) return -ENOMEM; + mutex_lock(&lock); ret = null_add_dev(dev); + mutex_unlock(&lock); if (ret) { null_free_dev(dev); return ret; @@ -2113,10 +2126,13 @@ static void __exit null_exit(void) if (tag_set.ops) blk_mq_free_tag_set(&tag_set); + + mutex_destroy(&lock); } module_init(null_init); module_exit(null_exit); MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); +MODULE_DESCRIPTION("multi queue aware block test driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 1689e25841..27928deccc 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -113,7 +113,7 @@ int null_init_zoned_dev(struct nullb_device *dev, if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) { dev->zone_max_open = dev->zone_max_active; pr_info("changed the maximum number of open zones to %u\n", - dev->nr_zones); + dev->zone_max_open); } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) { dev->zone_max_open = 0; pr_info("zone_max_open limit disabled, limit >= zone count\n"); diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 88262d3a93..ce97b336fb 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -3,7 +3,6 @@ * Copyright (c) 2008-2009 Atheros Communications Inc. */ - #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -128,7 +127,6 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); * for AR3012 */ static const struct usb_device_id ath3k_blist_tbl[] = { - /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, @@ -202,7 +200,7 @@ static inline void ath3k_log_failed_loading(int err, int len, int size, #define TIMEGAP_USEC_MAX 100 static int ath3k_load_firmware(struct usb_device *udev, - const struct firmware *firmware) + const struct firmware *firmware) { u8 *send_buf; int len = 0; @@ -237,9 +235,9 @@ static int ath3k_load_firmware(struct usb_device *udev, memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, - &len, 3000); + &len, 3000); - if (err || (len != size)) { + if (err || len != size) { ath3k_log_failed_loading(err, len, size, count); goto error; } @@ -262,7 +260,7 @@ static int ath3k_get_state(struct usb_device *udev, unsigned char *state) } static int ath3k_get_version(struct usb_device *udev, - struct ath3k_version *version) + struct ath3k_version *version) { return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, @@ -271,7 +269,7 @@ static int ath3k_get_version(struct usb_device *udev, } static int ath3k_load_fwfile(struct usb_device *udev, - const struct firmware *firmware) + const struct firmware *firmware) { u8 *send_buf; int len = 0; @@ -310,8 +308,8 @@ static int ath3k_load_fwfile(struct usb_device *udev, memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, - &len, 3000); - if (err || (len != size)) { + &len, 3000); + if (err || len != size) { ath3k_log_failed_loading(err, len, size, count); kfree(send_buf); return err; @@ -425,7 +423,6 @@ static int ath3k_load_syscfg(struct usb_device *udev) } switch (fw_version.ref_clock) { - case ATH3K_XTAL_FREQ_26M: clk_value = 26; break; @@ -441,7 +438,7 @@ static int ath3k_load_syscfg(struct usb_device *udev) } snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", - le32_to_cpu(fw_version.rom_version), clk_value, ".dfu"); + le32_to_cpu(fw_version.rom_version), clk_value, ".dfu"); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { @@ -456,7 +453,7 @@ static int ath3k_load_syscfg(struct usb_device *udev) } static int ath3k_probe(struct usb_interface *intf, - const struct usb_device_id *id) + const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); @@ -505,10 +502,10 @@ static int ath3k_probe(struct usb_interface *intf, if (ret < 0) { if (ret == -ENOENT) BT_ERR("Firmware file \"%s\" not found", - ATH3K_FIRMWARE); + ATH3K_FIRMWARE); else BT_ERR("Firmware file \"%s\" request failed (err=%d)", - ATH3K_FIRMWARE, ret); + ATH3K_FIRMWARE, ret); return ret; } diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 9658b33c82..18f34998a1 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -121,13 +121,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb) ((event->data[2] == MODULE_BROUGHT_UP) || (event->data[2] == MODULE_ALREADY_UP)) ? "Bring-up succeed" : "Bring-up failed"); - - if (event->length > 3 && event->data[3]) - priv->btmrvl_dev.dev_type = HCI_AMP; - else - priv->btmrvl_dev.dev_type = HCI_PRIMARY; - - BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type); } else if (priv->btmrvl_dev.sendcmdflag && event->data[1] == MODULE_SHUTDOWN_REQ) { BT_DBG("EVENT:%s", (event->data[2]) ? @@ -686,8 +679,6 @@ int btmrvl_register_hdev(struct btmrvl_private *priv) hdev->wakeup = btmrvl_wakeup; SET_HCIDEV_DEV(hdev, &card->func->dev); - hdev->dev_type = priv->btmrvl_dev.dev_type; - ret = hci_register_dev(hdev); if (ret < 0) { BT_ERR("Can not register HCI device"); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 638074992c..35fb26cbf2 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -148,8 +148,10 @@ static int qca_read_fw_build_info(struct hci_dev *hdev) } build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL); - if (!build_label) + if (!build_label) { + err = -ENOMEM; goto out; + } hci_set_fw_info(hdev, "%s", build_label); diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c index 634cf8f5ed..0c91d7635a 100644 --- a/drivers/bluetooth/btrsi.c +++ b/drivers/bluetooth/btrsi.c @@ -134,7 +134,6 @@ static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops) hdev->bus = HCI_USB; hci_set_drvdata(hdev, h_adapter); - hdev->dev_type = HCI_PRIMARY; hdev->open = rsi_hci_open; hdev->close = rsi_hci_close; hdev->flush = rsi_hci_flush; diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index f19d31ee37..fdcfe9c503 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -32,9 +32,6 @@ static const struct sdio_device_id btsdio_table[] = { /* Generic Bluetooth Type-B SDIO device */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, - /* Generic Bluetooth AMP controller */ - { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) }, - { } /* Terminating entry */ }; @@ -319,11 +316,6 @@ static int btsdio_probe(struct sdio_func *func, hdev->bus = HCI_SDIO; hci_set_drvdata(hdev, data); - if (id->class == SDIO_CLASS_BT_AMP) - hdev->dev_type = HCI_AMP; - else - hdev->dev_type = HCI_PRIMARY; - data->hdev = hdev; SET_HCIDEV_DEV(hdev, &func->dev); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 01b99471d1..fb716849b6 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -4332,11 +4332,6 @@ static int btusb_probe(struct usb_interface *intf, hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); - if (id->driver_info & BTUSB_AMP) - hdev->dev_type = HCI_AMP; - else - hdev->dev_type = HCI_PRIMARY; - data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c index 9a7243d5db..0c2f15235b 100644 --- a/drivers/bluetooth/hci_bcm4377.c +++ b/drivers/bluetooth/hci_bcm4377.c @@ -2361,7 +2361,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id) bcm4377->hdev = hdev; hdev->bus = HCI_PCI; - hdev->dev_type = HCI_PRIMARY; hdev->open = bcm4377_hci_open; hdev->close = bcm4377_hci_close; hdev->send = bcm4377_hci_send_frame; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index a26367e9fb..17a2f158a0 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -667,11 +667,6 @@ static int hci_uart_register_dev(struct hci_uart *hu) if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); - if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) - hdev->dev_type = HCI_AMP; - else - hdev->dev_type = HCI_PRIMARY; - /* Only call open() for the protocol after hdev is fully initialized as * open() (or a timer/workqueue it starts) may attempt to reference it. */ @@ -722,7 +717,6 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags) { unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) | BIT(HCI_UART_RESET_ON_INIT) | - BIT(HCI_UART_CREATE_AMP) | BIT(HCI_UART_INIT_PENDING) | BIT(HCI_UART_EXT_CONFIG) | BIT(HCI_UART_VND_DETECT); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 85c0d9b68f..89a22e9b32 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -366,11 +366,6 @@ int hci_uart_register_device_priv(struct hci_uart *hu, if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags)) set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); - if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) - hdev->dev_type = HCI_AMP; - else - hdev->dev_type = HCI_PRIMARY; - if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 68c8c7e95d..00bf7ae82c 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -37,7 +37,6 @@ #define HCI_UART_RAW_DEVICE 0 #define HCI_UART_RESET_ON_INIT 1 -#define HCI_UART_CREATE_AMP 2 #define HCI_UART_INIT_PENDING 3 #define HCI_UART_EXT_CONFIG 4 #define HCI_UART_VND_DETECT 5 diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 572d68d529..28750a40f0 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -384,17 +384,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; - __u8 dev_type; if (data->hdev) return -EBADFD; - /* bits 0-1 are dev_type (Primary or AMP) */ - dev_type = opcode & 0x03; - - if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP) - return -EINVAL; - /* bits 2-5 are reserved (must be zero) */ if (opcode & 0x3c) return -EINVAL; @@ -412,7 +405,6 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) data->hdev = hdev; hdev->bus = HCI_VIRTUAL; - hdev->dev_type = dev_type; hci_set_drvdata(hdev, data); hdev->open = vhci_open_dev; @@ -634,7 +626,7 @@ static void vhci_open_timeout(struct work_struct *work) struct vhci_data *data = container_of(work, struct vhci_data, open_timeout.work); - vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY); + vhci_create_device(data, 0x00); } static int vhci_open(struct inode *inode, struct file *file) diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c index 2ac70b560c..18208e152a 100644 --- a/drivers/bluetooth/virtio_bt.c +++ b/drivers/bluetooth/virtio_bt.c @@ -274,7 +274,6 @@ static int virtbt_probe(struct virtio_device *vdev) switch (type) { case VIRTIO_BT_CONFIG_TYPE_PRIMARY: - case VIRTIO_BT_CONFIG_TYPE_AMP: break; default: return -EINVAL; @@ -303,7 +302,6 @@ static int virtbt_probe(struct virtio_device *vdev) vbt->hdev = hdev; hdev->bus = HCI_VIRTIO; - hdev->dev_type = type; hci_set_drvdata(hdev, vbt); hdev->open = virtbt_open; diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index 379bc245c5..0e903d6e22 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -220,7 +220,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) if (err && i > RNG_NB_RECOVER_TRIES) { dev_err((struct device *)priv->rng.priv, "Couldn't recover from seed error\n"); - return -ENOTRECOVERABLE; + retval = -ENOTRECOVERABLE; + goto exit_rpm; } continue; @@ -238,7 +239,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) if (err && i > RNG_NB_RECOVER_TRIES) { dev_err((struct device *)priv->rng.priv, "Couldn't recover from seed error"); - return -ENOTRECOVERABLE; + retval = -ENOTRECOVERABLE; + goto exit_rpm; } continue; @@ -250,6 +252,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) max -= sizeof(u32); } +exit_rpm: pm_runtime_mark_last_busy((struct device *) priv->rng.priv); pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv); @@ -353,13 +356,15 @@ static int stm32_rng_init(struct hwrng *rng) err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg, reg & RNG_SR_DRDY, 10, 100000); - if (err | (reg & ~RNG_SR_DRDY)) { + if (err || (reg & ~RNG_SR_DRDY)) { clk_disable_unprepare(priv->clk); dev_err((struct device *)priv->rng.priv, "%s: timeout:%x SR: %x!\n", __func__, err, reg); return -EINVAL; } + clk_disable_unprepare(priv->clk); + return 0; } @@ -384,6 +389,11 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev) static int __maybe_unused stm32_rng_suspend(struct device *dev) { struct stm32_rng_private *priv = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(priv->clk); + if (err) + return err; if (priv->data->has_cond_reset) { priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR); @@ -465,6 +475,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev) writel_relaxed(reg, priv->base + RNG_CR); } + clk_disable_unprepare(priv->clk); + return 0; } diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index ee951b2652..58e9dcc2a3 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp) if (!port) { pr_warn("%s: no associated port!\n", name); rc = -ENXIO; - goto err; + goto err_free_name; } index = ida_alloc(&ida_index, GFP_KERNEL); + if (index < 0) { + pr_warn("%s: failed to get index!\n", name); + rc = index; + goto err_put_port; + } + memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; ppdev_cb.private = pp; pdev = parport_register_dev_model(port, name, &ppdev_cb, index); - parport_put_port(port); if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; ida_free(&ida_index, index); - goto err; + goto err_put_port; } pp->pdev = pdev; pp->index = index; dev_dbg(&pdev->dev, "registered pardevice\n"); -err: +err_put_port: + parport_put_port(port); +err_free_name: kfree(name); return rc; } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 714070ebb6..0c20fbc089 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); - flush_work(&priv->free_irq_work); + if (priv->free_irq_work.func) + flush_work(&priv->free_irq_work); tpm_tis_clkrun_enable(chip, false); diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index 3f9eaf27b4..c9eca24bba 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -37,6 +37,7 @@ #include "tpm_tis_spi.h" #define MAX_SPI_FRAMESIZE 64 +#define SPI_HDRSIZE 4 /* * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short, @@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy, int irq, const struct tpm_tis_phy_ops *phy_ops) { - phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); + phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL); if (!phy->iobuf) return -ENOMEM; diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c index e4fbbf3c40..3cb235df9d 100644 --- a/drivers/clk/bcm/clk-bcm2711-dvp.c +++ b/drivers/clk/bcm/clk-bcm2711-dvp.c @@ -56,6 +56,8 @@ static int clk_dvp_probe(struct platform_device *pdev) if (ret) return ret; + data->num = NR_CLOCKS; + data->hws[0] = clk_hw_register_gate_parent_data(&pdev->dev, "hdmi0-108MHz", &clk_dvp_parent, 0, @@ -76,7 +78,6 @@ static int clk_dvp_probe(struct platform_device *pdev) goto unregister_clk0; } - data->num = NR_CLOCKS; ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get, data); if (ret) diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c index 829406dc44..4d411408e4 100644 --- a/drivers/clk/bcm/clk-raspberrypi.c +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -371,8 +371,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi, if (IS_ERR(hw)) return PTR_ERR(hw); - data->hws[clks->id] = hw; data->num = clks->id + 1; + data->hws[clks->id] = hw; } clks++; diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c index 53e21ac302..4c3a5e4eb7 100644 --- a/drivers/clk/clk-renesas-pcie.c +++ b/drivers/clk/clk-renesas-pcie.c @@ -25,10 +25,12 @@ #define RS9_REG_SS_AMP_0V7 0x1 #define RS9_REG_SS_AMP_0V8 0x2 #define RS9_REG_SS_AMP_0V9 0x3 +#define RS9_REG_SS_AMP_DEFAULT RS9_REG_SS_AMP_0V8 #define RS9_REG_SS_AMP_MASK 0x3 #define RS9_REG_SS_SSC_100 0 #define RS9_REG_SS_SSC_M025 (1 << 3) #define RS9_REG_SS_SSC_M050 (3 << 3) +#define RS9_REG_SS_SSC_DEFAULT RS9_REG_SS_SSC_100 #define RS9_REG_SS_SSC_MASK (3 << 3) #define RS9_REG_SS_SSC_LOCK BIT(5) #define RS9_REG_SR 0x2 @@ -205,8 +207,8 @@ static int rs9_get_common_config(struct rs9_driver_data *rs9) int ret; /* Set defaults */ - rs9->pll_amplitude = RS9_REG_SS_AMP_0V7; - rs9->pll_ssc = RS9_REG_SS_SSC_100; + rs9->pll_amplitude = RS9_REG_SS_AMP_DEFAULT; + rs9->pll_ssc = RS9_REG_SS_SSC_DEFAULT; /* Output clock amplitude */ ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt", @@ -247,13 +249,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9) int i; /* If amplitude is non-default, update it. */ - if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) { + if (rs9->pll_amplitude != RS9_REG_SS_AMP_DEFAULT) { regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK, rs9->pll_amplitude); } /* If SSC is non-default, update it. */ - if (rs9->pll_ssc != RS9_REG_SS_SSC_100) { + if (rs9->pll_ssc != RS9_REG_SS_SSC_DEFAULT) { regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK, rs9->pll_ssc); } diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c index 01a2ef8f59..3f62ec7507 100644 --- a/drivers/clk/mediatek/clk-mt8365-mm.c +++ b/drivers/clk/mediatek/clk-mt8365-mm.c @@ -53,7 +53,7 @@ static const struct mtk_gate mm_clks[] = { GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17), GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18), GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19), - GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20), + GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "dpi0_sel", 20), GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21), GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22), GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23), diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c index 3a2b3f90be..094ec8a26d 100644 --- a/drivers/clk/mediatek/clk-pllfh.c +++ b/drivers/clk/mediatek/clk-pllfh.c @@ -68,7 +68,7 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs, node = of_find_compatible_node(NULL, NULL, compatible_node); if (!node) { - pr_err("cannot find \"%s\"\n", compatible_node); + pr_warn("cannot find \"%s\"\n", compatible_node); return; } diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 8ab08e7b5b..1bb51a0588 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -474,6 +474,7 @@ config SC_CAMCC_7280 config SC_CAMCC_8280XP tristate "SC8280XP Camera Clock Controller" + depends on ARM64 || COMPILE_TEST select SC_GCC_8280XP help Support for the camera clock controller on Qualcomm Technologies, Inc @@ -1094,6 +1095,7 @@ config SM_GPUCC_8550 config SM_GPUCC_8650 tristate "SM8650 Graphics Clock Controller" + depends on ARM64 || COMPILE_TEST select SM_GCC_8650 help Support for the graphics clock controller on SM8650 devices. diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c index 678b805f13..d7ab5bd5d4 100644 --- a/drivers/clk/qcom/apss-ipq-pll.c +++ b/drivers/clk/qcom/apss-ipq-pll.c @@ -55,6 +55,29 @@ static struct clk_alpha_pll ipq_pll_huayra = { }, }; +static struct clk_alpha_pll ipq_pll_stromer = { + .offset = 0x0, + /* + * Reuse CLK_ALPHA_PLL_TYPE_STROMER_PLUS register offsets. + * Although this is a bit confusing, but the offset values + * are correct nevertheless. + */ + .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS], + .flags = SUPPORTS_DYNAMIC_UPDATE, + .clkr = { + .enable_reg = 0x0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "a53pll", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_stromer_ops, + }, + }, +}; + static struct clk_alpha_pll ipq_pll_stromer_plus = { .offset = 0x0, .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS], @@ -73,8 +96,9 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = { }, }; +/* 1.008 GHz configuration */ static const struct alpha_pll_config ipq5018_pll_config = { - .l = 0x32, + .l = 0x2a, .config_ctl_val = 0x4001075b, .config_ctl_hi_val = 0x304, .main_output_mask = BIT(0), @@ -144,8 +168,8 @@ struct apss_pll_data { }; static const struct apss_pll_data ipq5018_pll_data = { - .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS, - .pll = &ipq_pll_stromer_plus, + .pll_type = CLK_ALPHA_PLL_TYPE_STROMER, + .pll = &ipq_pll_stromer, .pll_config = &ipq5018_pll_config, }; @@ -203,7 +227,8 @@ static int apss_ipq_pll_probe(struct platform_device *pdev) if (data->pll_type == CLK_ALPHA_PLL_TYPE_HUAYRA) clk_alpha_pll_configure(data->pll, regmap, data->pll_config); - else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS) + else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER || + data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS) clk_stromer_pll_configure(data->pll, regmap, data->pll_config); ret = devm_clk_register_regmap(dev, &data->pll->clkr); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 8a412ef47e..be18ff983d 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -213,7 +213,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_USER_CTL] = 0x18, [PLL_OFF_USER_CTL_U] = 0x1c, [PLL_OFF_CONFIG_CTL] = 0x20, - [PLL_OFF_CONFIG_CTL_U] = 0xff, [PLL_OFF_TEST_CTL] = 0x30, [PLL_OFF_TEST_CTL_U] = 0x34, [PLL_OFF_STATUS] = 0x28, @@ -2490,6 +2489,8 @@ static int clk_alpha_pll_stromer_set_rate(struct clk_hw *hw, unsigned long rate, rate = alpha_pll_round_rate(rate, prate, &l, &a, ALPHA_REG_BITWIDTH); regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); + + a <<= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH; regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a); regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> ALPHA_BITWIDTH); diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c index 8394353620..e4b7464c4d 100644 --- a/drivers/clk/qcom/dispcc-sm6350.c +++ b/drivers/clk/qcom/dispcc-sm6350.c @@ -221,26 +221,17 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = { }, }; -static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = { - F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0), - F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0), - F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0), - F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0), - { } -}; - static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = { .cmd_rcgr = 0x10f8, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_0, - .freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_dp_link_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c index 92e9c4e7b1..49bb4f58c3 100644 --- a/drivers/clk/qcom/dispcc-sm8450.c +++ b/drivers/clk/qcom/dispcc-sm8450.c @@ -309,26 +309,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { }, }; -static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = { - F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - { } -}; - static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { .cmd_rcgr = 0x819c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -382,13 +373,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -442,13 +432,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -502,13 +491,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c index 3672c73ac1..38ecea8055 100644 --- a/drivers/clk/qcom/dispcc-sm8550.c +++ b/drivers/clk/qcom/dispcc-sm8550.c @@ -345,26 +345,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { }, }; -static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = { - F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - { } -}; - static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { .cmd_rcgr = 0x8170, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_7, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk_src", .parent_data = disp_cc_parent_data_7, .num_parents = ARRAY_SIZE(disp_cc_parent_data_7), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -418,13 +409,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -478,13 +468,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -538,13 +527,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c index 9539db0d91..3eb64bcad4 100644 --- a/drivers/clk/qcom/dispcc-sm8650.c +++ b/drivers/clk/qcom/dispcc-sm8650.c @@ -343,26 +343,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { }, }; -static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = { - F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0), - { } -}; - static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { .cmd_rcgr = 0x8170, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_7, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk_src", .parent_data = disp_cc_parent_data_7, .num_parents = ARRAY_SIZE(disp_cc_parent_data_7), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -416,13 +407,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -476,13 +466,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; @@ -536,13 +525,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, - .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_byte2_ops, }, }; diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c index 1180e48c68..275fb3b71e 100644 --- a/drivers/clk/qcom/mmcc-msm8998.c +++ b/drivers/clk/qcom/mmcc-msm8998.c @@ -2535,6 +2535,8 @@ static struct clk_branch vmem_ahb_clk = { static struct gdsc video_top_gdsc = { .gdscr = 0x1024, + .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 }, + .cxc_count = 3, .pd = { .name = "video_top", }, @@ -2543,20 +2545,26 @@ static struct gdsc video_top_gdsc = { static struct gdsc video_subcore0_gdsc = { .gdscr = 0x1040, + .cxcs = (unsigned int []){ 0x1048 }, + .cxc_count = 1, .pd = { .name = "video_subcore0", }, .parent = &video_top_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, + .flags = HW_CTRL, }; static struct gdsc video_subcore1_gdsc = { .gdscr = 0x1044, + .cxcs = (unsigned int []){ 0x104c }, + .cxc_count = 1, .pd = { .name = "video_subcore1", }, .parent = &video_top_gdsc.pd, .pwrsts = PWRSTS_OFF_ON, + .flags = HW_CTRL, }; static struct gdsc mdss_gdsc = { diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c index 4c2872f453..ff3f85e906 100644 --- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c @@ -139,7 +139,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = { DEF_MOD("avb3", 214, R8A779A0_CLK_S3D2), DEF_MOD("avb4", 215, R8A779A0_CLK_S3D2), DEF_MOD("avb5", 216, R8A779A0_CLK_S3D2), - DEF_MOD("canfd0", 328, R8A779A0_CLK_CANFD), + DEF_MOD("canfd0", 328, R8A779A0_CLK_S3D2), DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0), DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0), DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0), diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c index 33532673d2..26b71547fd 100644 --- a/drivers/clk/renesas/r9a07g043-cpg.c +++ b/drivers/clk/renesas/r9a07g043-cpg.c @@ -280,6 +280,10 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = { 0x5a8, 1), DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU, 0x5ac, 0), +#ifdef CONFIG_RISCV + DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1, + 0x608, 0), +#endif }; static struct rzg2l_reset r9a07g043_resets[] = { @@ -338,6 +342,10 @@ static struct rzg2l_reset r9a07g043_resets[] = { DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0), DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1), DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0), +#ifdef CONFIG_RISCV + DEF_RST(R9A07G043_NCEPLIC_ARESETN, 0x908, 0), +#endif + }; static const unsigned int r9a07g043_crit_mod_clks[] __initconst = { @@ -347,6 +355,7 @@ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = { #endif #ifdef CONFIG_RISCV MOD_CLK_BASE + R9A07G043_IAX45_CLK, + MOD_CLK_BASE + R9A07G043_NCEPLIC_ACLK, #endif MOD_CLK_BASE + R9A07G043_DMAC_ACLK, }; diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c index e9c06eb93e..f04bacacab 100644 --- a/drivers/clk/samsung/clk-exynosautov9.c +++ b/drivers/clk/samsung/clk-exynosautov9.c @@ -352,13 +352,13 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = { /* CMU_TOP_PURECLKCOMP */ PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk", PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL), - PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk", + PLL(pll_0822x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk", PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL), - PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk", + PLL(pll_0822x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk", PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL), - PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk", + PLL(pll_0822x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk", PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL), - PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk", + PLL(pll_0822x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk", PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL), }; diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c index d065e343a8..bd3c1b0271 100644 --- a/drivers/clk/samsung/clk-gs101.c +++ b/drivers/clk/samsung/clk-gs101.c @@ -2763,33 +2763,33 @@ static const struct samsung_mux_clock peric0_mux_clks[] __initconst = { MUX(CLK_MOUT_PERIC0_USI0_UART_USER, "mout_peric0_usi0_uart_user", mout_peric0_usi0_uart_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI0_UART_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI14_USI_USER, - "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI1_USI_USER, - "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI2_USI_USER, - "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI3_USI_USER, - "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI4_USI_USER, - "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI5_USI_USER, - "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI6_USI_USER, - "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI7_USI_USER, - "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC0_USI8_USI_USER, - "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p, - PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI14_USI_USER, + "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI1_USI_USER, + "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI2_USI_USER, + "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI3_USI_USER, + "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI4_USI_USER, + "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI5_USI_USER, + "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI6_USI_USER, + "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI7_USI_USER, + "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC0_USI8_USI_USER, + "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p, + PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1), }; static const struct samsung_div_clock peric0_div_clks[] __initconst = { @@ -2798,33 +2798,42 @@ static const struct samsung_div_clock peric0_div_clks[] __initconst = { DIV(CLK_DOUT_PERIC0_USI0_UART, "dout_peric0_usi0_uart", "mout_peric0_usi0_uart_user", CLK_CON_DIV_DIV_CLK_PERIC0_USI0_UART, 0, 4), - DIV(CLK_DOUT_PERIC0_USI14_USI, - "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI1_USI, - "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI2_USI, - "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI3_USI, - "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI4_USI, - "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI5_USI, - "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI6_USI, - "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI7_USI, - "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4), - DIV(CLK_DOUT_PERIC0_USI8_USI, - "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4), + DIV_F(CLK_DOUT_PERIC0_USI14_USI, + "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI1_USI, + "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI2_USI, + "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI3_USI, + "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI4_USI, + "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI5_USI, + "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI6_USI, + "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI7_USI, + "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC0_USI8_USI, + "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), }; static const struct samsung_gate_clock peric0_gate_clks[] __initconst = { @@ -2857,11 +2866,11 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = { GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0, "gout_peric0_peric0_top0_ipclk_0", "dout_peric0_usi1_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1, "gout_peric0_peric0_top0_ipclk_1", "dout_peric0_usi2_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_10, "gout_peric0_peric0_top0_ipclk_10", "dout_peric0_i3c", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10, @@ -2889,27 +2898,27 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = { GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2, "gout_peric0_peric0_top0_ipclk_2", "dout_peric0_usi3_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3, "gout_peric0_peric0_top0_ipclk_3", "dout_peric0_usi4_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4, "gout_peric0_peric0_top0_ipclk_4", "dout_peric0_usi5_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5, "gout_peric0_peric0_top0_ipclk_5", "dout_peric0_usi6_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6, "gout_peric0_peric0_top0_ipclk_6", "dout_peric0_usi7_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7, "gout_peric0_peric0_top0_ipclk_7", "dout_peric0_usi8_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_8, "gout_peric0_peric0_top0_ipclk_8", "dout_peric0_i3c", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8, @@ -2990,7 +2999,7 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = { GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2, "gout_peric0_peric0_top1_ipclk_2", "dout_peric0_usi14_usi", CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_2, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), /* Disabling this clock makes the system hang. Mark the clock as critical. */ GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0, "gout_peric0_peric0_top1_pclk_0", "mout_peric0_bus_user", @@ -3230,47 +3239,53 @@ static const struct samsung_mux_clock peric1_mux_clks[] __initconst = { MUX(CLK_MOUT_PERIC1_I3C_USER, "mout_peric1_i3c_user", mout_peric1_nonbususer_p, PLL_CON0_MUX_CLKCMU_PERIC1_I3C_USER, 4, 1), - MUX(CLK_MOUT_PERIC1_USI0_USI_USER, - "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p, - PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC1_USI10_USI_USER, - "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p, - PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC1_USI11_USI_USER, - "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p, - PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC1_USI12_USI_USER, - "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p, - PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC1_USI13_USI_USER, - "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p, - PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1), - MUX(CLK_MOUT_PERIC1_USI9_USI_USER, - "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p, - PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC1_USI0_USI_USER, + "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p, + PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC1_USI10_USI_USER, + "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p, + PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC1_USI11_USI_USER, + "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p, + PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC1_USI12_USI_USER, + "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p, + PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC1_USI13_USI_USER, + "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p, + PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1), + nMUX(CLK_MOUT_PERIC1_USI9_USI_USER, + "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p, + PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1), }; static const struct samsung_div_clock peric1_div_clks[] __initconst = { DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c", "mout_peric1_i3c_user", CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4), - DIV(CLK_DOUT_PERIC1_USI0_USI, - "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4), - DIV(CLK_DOUT_PERIC1_USI10_USI, - "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4), - DIV(CLK_DOUT_PERIC1_USI11_USI, - "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4), - DIV(CLK_DOUT_PERIC1_USI12_USI, - "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4), - DIV(CLK_DOUT_PERIC1_USI13_USI, - "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4), - DIV(CLK_DOUT_PERIC1_USI9_USI, - "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user", - CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4), + DIV_F(CLK_DOUT_PERIC1_USI0_USI, + "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC1_USI10_USI, + "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC1_USI11_USI, + "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC1_USI12_USI, + "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC1_USI13_USI, + "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), + DIV_F(CLK_DOUT_PERIC1_USI9_USI, + "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user", + CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4, + CLK_SET_RATE_PARENT, 0), }; static const struct samsung_gate_clock peric1_gate_clks[] __initconst = { @@ -3305,27 +3320,27 @@ static const struct samsung_gate_clock peric1_gate_clks[] __initconst = { GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1, "gout_peric1_peric1_top0_ipclk_1", "dout_peric1_usi0_usi", CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2, "gout_peric1_peric1_top0_ipclk_2", "dout_peric1_usi9_usi", CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3, "gout_peric1_peric1_top0_ipclk_3", "dout_peric1_usi10_usi", CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4, "gout_peric1_peric1_top0_ipclk_4", "dout_peric1_usi11_usi", CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5, "gout_peric1_peric1_top0_ipclk_5", "dout_peric1_usi12_usi", CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6, "gout_peric1_peric1_top0_ipclk_6", "dout_peric1_usi13_usi", CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6, - 21, 0, 0), + 21, CLK_SET_RATE_PARENT, 0), GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_8, "gout_peric1_peric1_top0_ipclk_8", "dout_peric1_i3c", CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8, diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index a763309e6f..556167350b 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -133,7 +133,7 @@ struct samsung_mux_clock { .name = cname, \ .parent_names = pnames, \ .num_parents = ARRAY_SIZE(pnames), \ - .flags = (f) | CLK_SET_RATE_NO_REPARENT, \ + .flags = f, \ .offset = o, \ .shift = s, \ .width = w, \ @@ -141,9 +141,16 @@ struct samsung_mux_clock { } #define MUX(_id, cname, pnames, o, s, w) \ - __MUX(_id, cname, pnames, o, s, w, 0, 0) + __MUX(_id, cname, pnames, o, s, w, CLK_SET_RATE_NO_REPARENT, 0) #define MUX_F(_id, cname, pnames, o, s, w, f, mf) \ + __MUX(_id, cname, pnames, o, s, w, (f) | CLK_SET_RATE_NO_REPARENT, mf) + +/* Used by MUX clocks where reparenting on clock rate change is allowed. */ +#define nMUX(_id, cname, pnames, o, s, w) \ + __MUX(_id, cname, pnames, o, s, w, 0, 0) + +#define nMUX_F(_id, cname, pnames, o, s, w, f, mf) \ __MUX(_id, cname, pnames, o, s, w, f, mf) /** diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c index 25b8e1a80d..b32a59fe55 100644 --- a/drivers/clk/sifive/sifive-prci.c +++ b/drivers/clk/sifive/sifive-prci.c @@ -4,7 +4,6 @@ * Copyright (C) 2020 Zong Li */ -#include <linux/clkdev.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> @@ -537,13 +536,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd, return r; } - r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev)); - if (r) { - dev_warn(dev, "Failed to register clkdev for %s: %d\n", - init.name, r); - return r; - } - pd->hw_clks.hws[i] = &pic->hw; } diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index f04ae67dda..fc275d41d5 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -26,10 +26,11 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/fs.h> -#include <linux/amd-pstate.h> #include <acpi/cppc_acpi.h> +#include "amd-pstate.h" + /* * Abbreviations: * amd_pstate_ut: used as a shortform for AMD P-State unit test. diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 28166df81c..6af175e6c0 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -36,7 +36,6 @@ #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/static_call.h> -#include <linux/amd-pstate.h> #include <linux/topology.h> #include <acpi/processor.h> @@ -46,6 +45,8 @@ #include <asm/processor.h> #include <asm/cpufeature.h> #include <asm/cpu_device_id.h> + +#include "amd-pstate.h" #include "amd-pstate-trace.h" #define AMD_PSTATE_TRANSITION_LATENCY 20000 @@ -53,6 +54,37 @@ #define CPPC_HIGHEST_PERF_PERFORMANCE 196 #define CPPC_HIGHEST_PERF_DEFAULT 166 +#define AMD_CPPC_EPP_PERFORMANCE 0x00 +#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 +#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF +#define AMD_CPPC_EPP_POWERSAVE 0xFF + +/* + * enum amd_pstate_mode - driver working mode of amd pstate + */ +enum amd_pstate_mode { + AMD_PSTATE_UNDEFINED = 0, + AMD_PSTATE_DISABLE, + AMD_PSTATE_PASSIVE, + AMD_PSTATE_ACTIVE, + AMD_PSTATE_GUIDED, + AMD_PSTATE_MAX, +}; + +static const char * const amd_pstate_mode_string[] = { + [AMD_PSTATE_UNDEFINED] = "undefined", + [AMD_PSTATE_DISABLE] = "disable", + [AMD_PSTATE_PASSIVE] = "passive", + [AMD_PSTATE_ACTIVE] = "active", + [AMD_PSTATE_GUIDED] = "guided", + NULL, +}; + +struct quirk_entry { + u32 nominal_freq; + u32 lowest_freq; +}; + /* * TODO: We need more time to fine tune processors with shared memory solution * with community together. @@ -68,6 +100,7 @@ static struct cpufreq_driver amd_pstate_epp_driver; static int cppc_state = AMD_PSTATE_UNDEFINED; static bool cppc_enabled; static bool amd_pstate_prefcore = true; +static struct quirk_entry *quirks; /* * AMD Energy Preference Performance (EPP) @@ -112,6 +145,41 @@ static unsigned int epp_values[] = { typedef int (*cppc_mode_transition_fn)(int); +static struct quirk_entry quirk_amd_7k62 = { + .nominal_freq = 2600, + .lowest_freq = 550, +}; + +static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) +{ + /** + * match the broken bios for family 17h processor support CPPC V2 + * broken BIOS lack of nominal_freq and lowest_freq capabilities + * definition in ACPI tables + */ + if (boot_cpu_has(X86_FEATURE_ZEN2)) { + quirks = dmi->driver_data; + pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); + return 1; + } + + return 0; +} + +static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = { + { + .callback = dmi_matched_7k62_bios_bug, + .ident = "AMD EPYC 7K62", + .matches = { + DMI_MATCH(DMI_BIOS_VERSION, "5.14"), + DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"), + }, + .driver_data = &quirk_amd_7k62, + }, + {} +}; +MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table); + static inline int get_mode_idx_from_str(const char *str, size_t size) { int i; @@ -622,74 +690,22 @@ static void amd_pstate_adjust_perf(unsigned int cpu, static int amd_get_min_freq(struct amd_cpudata *cpudata) { - struct cppc_perf_caps cppc_perf; - - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; - - /* Switch to khz */ - return cppc_perf.lowest_freq * 1000; + return READ_ONCE(cpudata->min_freq); } static int amd_get_max_freq(struct amd_cpudata *cpudata) { - struct cppc_perf_caps cppc_perf; - u32 max_perf, max_freq, nominal_freq, nominal_perf; - u64 boost_ratio; - - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; - - nominal_freq = cppc_perf.nominal_freq; - nominal_perf = READ_ONCE(cpudata->nominal_perf); - max_perf = READ_ONCE(cpudata->highest_perf); - - boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT, - nominal_perf); - - max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT; - - /* Switch to khz */ - return max_freq * 1000; + return READ_ONCE(cpudata->max_freq); } static int amd_get_nominal_freq(struct amd_cpudata *cpudata) { - struct cppc_perf_caps cppc_perf; - - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; - - /* Switch to khz */ - return cppc_perf.nominal_freq * 1000; + return READ_ONCE(cpudata->nominal_freq); } static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata) { - struct cppc_perf_caps cppc_perf; - u32 lowest_nonlinear_freq, lowest_nonlinear_perf, - nominal_freq, nominal_perf; - u64 lowest_nonlinear_ratio; - - int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); - if (ret) - return ret; - - nominal_freq = cppc_perf.nominal_freq; - nominal_perf = READ_ONCE(cpudata->nominal_perf); - - lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; - - lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, - nominal_perf); - - lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT; - - /* Switch to khz */ - return lowest_nonlinear_freq * 1000; + return READ_ONCE(cpudata->lowest_nonlinear_freq); } static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) @@ -705,7 +721,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) if (state) policy->cpuinfo.max_freq = cpudata->max_freq; else - policy->cpuinfo.max_freq = cpudata->nominal_freq; + policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000; policy->max = policy->cpuinfo.max_freq; @@ -844,6 +860,61 @@ free_cpufreq_put: mutex_unlock(&amd_pstate_driver_lock); } +/** + * amd_pstate_init_freq: Initialize the max_freq, min_freq, + * nominal_freq and lowest_nonlinear_freq for + * the @cpudata object. + * + * Requires: highest_perf, lowest_perf, nominal_perf and + * lowest_nonlinear_perf members of @cpudata to be + * initialized. + * + * Returns 0 on success, non-zero value on failure. + */ +static int amd_pstate_init_freq(struct amd_cpudata *cpudata) +{ + int ret; + u32 min_freq; + u32 highest_perf, max_freq; + u32 nominal_perf, nominal_freq; + u32 lowest_nonlinear_perf, lowest_nonlinear_freq; + u32 boost_ratio, lowest_nonlinear_ratio; + struct cppc_perf_caps cppc_perf; + + + ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); + if (ret) + return ret; + + if (quirks && quirks->lowest_freq) + min_freq = quirks->lowest_freq * 1000; + else + min_freq = cppc_perf.lowest_freq * 1000; + + if (quirks && quirks->nominal_freq) + nominal_freq = quirks->nominal_freq ; + else + nominal_freq = cppc_perf.nominal_freq; + + nominal_perf = READ_ONCE(cpudata->nominal_perf); + + highest_perf = READ_ONCE(cpudata->highest_perf); + boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); + max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000; + + lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); + lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, + nominal_perf); + lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000; + + WRITE_ONCE(cpudata->min_freq, min_freq); + WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq); + WRITE_ONCE(cpudata->nominal_freq, nominal_freq); + WRITE_ONCE(cpudata->max_freq, max_freq); + + return 0; +} + static int amd_pstate_cpu_init(struct cpufreq_policy *policy) { int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; @@ -871,6 +942,10 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) if (ret) goto free_cpudata1; + ret = amd_pstate_init_freq(cpudata); + if (ret) + goto free_cpudata1; + min_freq = amd_get_min_freq(cpudata); max_freq = amd_get_max_freq(cpudata); nominal_freq = amd_get_nominal_freq(cpudata); @@ -912,13 +987,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) goto free_cpudata2; } - /* Initial processor data capability frequencies */ - cpudata->max_freq = max_freq; - cpudata->min_freq = min_freq; cpudata->max_limit_freq = max_freq; cpudata->min_limit_freq = min_freq; - cpudata->nominal_freq = nominal_freq; - cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; policy->driver_data = cpudata; @@ -1333,6 +1403,10 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) if (ret) goto free_cpudata1; + ret = amd_pstate_init_freq(cpudata); + if (ret) + goto free_cpudata1; + min_freq = amd_get_min_freq(cpudata); max_freq = amd_get_max_freq(cpudata); nominal_freq = amd_get_nominal_freq(cpudata); @@ -1349,12 +1423,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) /* It will be updated by governor */ policy->cur = policy->cpuinfo.min_freq; - /* Initial processor data capability frequencies */ - cpudata->max_freq = max_freq; - cpudata->min_freq = min_freq; - cpudata->nominal_freq = nominal_freq; - cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; - policy->driver_data = cpudata; cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0); @@ -1394,6 +1462,13 @@ free_cpudata1: static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) { + struct amd_cpudata *cpudata = policy->driver_data; + + if (cpudata) { + kfree(cpudata); + policy->driver_data = NULL; + } + pr_debug("CPU %d exiting\n", policy->cpu); return 0; } @@ -1672,6 +1747,11 @@ static int __init amd_pstate_init(void) if (cpufreq_get_current_driver()) return -EEXIST; + quirks = NULL; + + /* check if this machine need CPPC quirks */ + dmi_check_system(amd_pstate_quirks_table); + switch (cppc_state) { case AMD_PSTATE_UNDEFINED: /* Disable on the following configs by default: diff --git a/include/linux/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index d21838835a..bc341f3590 100644 --- a/include/linux/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * linux/include/linux/amd-pstate.h - * * Copyright (C) 2022 Advanced Micro Devices, Inc. * * Author: Meng Li <li.meng@amd.com> @@ -12,11 +10,6 @@ #include <linux/pm_qos.h> -#define AMD_CPPC_EPP_PERFORMANCE 0x00 -#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 -#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF -#define AMD_CPPC_EPP_POWERSAVE 0xFF - /********************************************************************* * AMD P-state INTERFACE * *********************************************************************/ @@ -104,24 +97,4 @@ struct amd_cpudata { bool suspended; }; -/* - * enum amd_pstate_mode - driver working mode of amd pstate - */ -enum amd_pstate_mode { - AMD_PSTATE_UNDEFINED = 0, - AMD_PSTATE_DISABLE, - AMD_PSTATE_PASSIVE, - AMD_PSTATE_ACTIVE, - AMD_PSTATE_GUIDED, - AMD_PSTATE_MAX, -}; - -static const char * const amd_pstate_mode_string[] = { - [AMD_PSTATE_UNDEFINED] = "undefined", - [AMD_PSTATE_DISABLE] = "disable", - [AMD_PSTATE_PASSIVE] = "passive", - [AMD_PSTATE_ACTIVE] = "active", - [AMD_PSTATE_GUIDED] = "guided", - NULL, -}; #endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 1a1857b0a6..ea8438550b 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -481,9 +481,12 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv) static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct private_data *priv; + if (!policy) return 0; - struct private_data *priv = policy->driver_data; + + priv = policy->driver_data; cpufreq_cpu_put(policy); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 64420d9cfd..15f1d41920 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -741,10 +741,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) { struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct cppc_cpudata *cpu_data = policy->driver_data; + struct cppc_cpudata *cpu_data; u64 delivered_perf; int ret; + if (!policy) + return -ENODEV; + + cpu_data = policy->driver_data; + cpufreq_cpu_put(policy); ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); @@ -822,10 +827,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = { static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - struct cppc_cpudata *cpu_data = policy->driver_data; + struct cppc_cpudata *cpu_data; u64 desired_perf; int ret; + if (!policy) + return -ENODEV; + + cpu_data = policy->driver_data; + cpufreq_cpu_put(policy); ret = cppc_get_desired_perf(cpu, &desired_perf); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 66e10a19d7..fd9c3ed21f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1679,10 +1679,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy) */ if (cpufreq_driver->offline) { cpufreq_driver->offline(policy); - } else if (cpufreq_driver->exit) { - cpufreq_driver->exit(policy); - policy->freq_table = NULL; + return; } + + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + + policy->freq_table = NULL; } static int cpufreq_offline(unsigned int cpu) @@ -1740,7 +1743,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) } /* We did light-weight exit earlier, do full tear down now */ - if (cpufreq_driver->offline) + if (cpufreq_driver->offline && cpufreq_driver->exit) cpufreq_driver->exit(policy); up_write(&policy->rwsem); diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index 07989bb8c2..3fdc64b5a6 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len, if (hash_iv_len) { packet_log(" Hash IV Length %u bytes\n", hash_iv_len); packet_dump(" hash IV: ", ptr, hash_iv_len); - ptr += ciph_key_len; + ptr += hash_iv_len; } if (ciph_iv_len) { diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index 4733012377..ff6ceb4fee 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = { }, }; -#ifdef CONFIG_ACPI static const struct acpi_device_id sp_acpi_match[] = { { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] }, { }, }; MODULE_DEVICE_TABLE(acpi, sp_acpi_match); -#endif -#ifdef CONFIG_OF static const struct of_device_id sp_of_match[] = { { .compatible = "amd,ccp-seattle-v1a", .data = (const void *)&dev_vdata[0] }, { }, }; MODULE_DEVICE_TABLE(of, sp_of_match); -#endif static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) { -#ifdef CONFIG_OF const struct of_device_id *match; match = of_match_node(sp_of_match, pdev->dev.of_node); if (match && match->data) return (struct sp_dev_vdata *)match->data; -#endif + return NULL; } static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) { -#ifdef CONFIG_ACPI const struct acpi_device_id *match; match = acpi_match_device(sp_acpi_match, &pdev->dev); if (match && match->driver_data) return (struct sp_dev_vdata *)match->driver_data; -#endif + return NULL; } @@ -212,12 +206,8 @@ static int sp_platform_resume(struct platform_device *pdev) static struct platform_driver sp_platform_driver = { .driver = { .name = "ccp", -#ifdef CONFIG_ACPI .acpi_match_table = sp_acpi_match, -#endif -#ifdef CONFIG_OF .of_match_table = sp_of_match, -#endif }, .probe = sp_platform_probe, .remove_new = sp_platform_remove, diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 92f0a1d9b4..13e413533f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2893,12 +2893,9 @@ void hisi_qm_uninit(struct hisi_qm *qm) hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); + qm_remove_uacce(qm); qm_irqs_unregister(qm); hisi_qm_pci_uninit(qm); - if (qm->use_sva) { - uacce_remove(qm->uacce); - qm->uacce = NULL; - } } EXPORT_SYMBOL_GPL(hisi_qm_uninit); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 93a972fcbf..0558f98e22 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -481,8 +481,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, if (ctx->pbuf_supported) sec_free_pbuf_resource(dev, qp_ctx->res); - if (ctx->alg_type == SEC_AEAD) + if (ctx->alg_type == SEC_AEAD) { sec_free_mac_resource(dev, qp_ctx->res); + sec_free_aiv_resource(dev, qp_ctx->res); + } } static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 1102c47f82..1d0ef47a9f 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -296,7 +296,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { if (adf_gen4_init_thd2arb_map(accel_dev)) dev_warn(&GET_DEV(accel_dev), - "Generate of the thread to arbiter map failed"); + "Failed to generate thread to arbiter mapping"); return GET_HW_DATA(accel_dev)->thd_to_arb_map; } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 927506cf27..fb34fd7f03 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -208,7 +208,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { if (adf_gen4_init_thd2arb_map(accel_dev)) dev_warn(&GET_DEV(accel_dev), - "Generate of the thread to arbiter map failed"); + "Failed to generate thread to arbiter mapping"); return GET_HW_DATA(accel_dev)->thd_to_arb_map; } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 9762f2bf77..d26564cebd 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -197,7 +197,9 @@ module_pci_driver(adf_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_FIRMWARE(ADF_4XXX_FW); +MODULE_FIRMWARE(ADF_402XX_FW); MODULE_FIRMWARE(ADF_4XXX_MMP); +MODULE_FIRMWARE(ADF_402XX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); MODULE_SOFTDEP("pre: crypto-intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 9da2278bd5..04260f61d0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -130,8 +130,7 @@ static void adf_device_reset_worker(struct work_struct *work) if (adf_dev_restart(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); - if (reset_data->mode == ADF_DEV_RESET_ASYNC || - completion_done(&reset_data->compl)) + if (reset_data->mode == ADF_DEV_RESET_ASYNC) kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; @@ -147,16 +146,8 @@ static void adf_device_reset_worker(struct work_struct *work) adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); - /* - * The dev is back alive. Notify the caller if in sync mode - * - * If device restart will take a more time than expected, - * the schedule_reset() function can timeout and exit. This can be - * detected by calling the completion_done() function. In this case - * the reset_data structure needs to be freed here. - */ - if (reset_data->mode == ADF_DEV_RESET_ASYNC || - completion_done(&reset_data->compl)) + /* The dev is back alive. Notify the caller if in sync mode */ + if (reset_data->mode == ADF_DEV_RESET_ASYNC) kfree(reset_data); else complete(&reset_data->compl); @@ -191,10 +182,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, if (!timeout) { dev_err(&GET_DEV(accel_dev), "Reset device timeout expired\n"); + cancel_work_sync(&reset_data->reset_work); ret = -EFAULT; - } else { - kfree(reset_data); } + kfree(reset_data); return ret; } return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c index 7fc7a77f6a..c7ad8cf078 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -149,5 +149,6 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) tl_data->sl_exec_counters = sl_exec_counters; tl_data->rp_counters = rp_counters; tl_data->num_rp_counters = ARRAY_SIZE(rp_counters); + tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE; } EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index d4f2db3c53..e10f0024f4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -1125,7 +1125,7 @@ int adf_rl_start(struct adf_accel_dev *accel_dev) } if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { - dev_info(&GET_DEV(accel_dev), "not supported\n"); + dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n"); ret = -EOPNOTSUPP; goto ret_free; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c index 2ff714d11b..74fb0c2ed2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -41,6 +41,20 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data) return 0; } +static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count, + u8 max_slices_per_type) +{ + u8 *sl_counter = (u8 *)slice_count; + int i; + + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + if (sl_counter[i] > max_slices_per_type) + return -EINVAL; + } + + return 0; +} + static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) { struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); @@ -214,6 +228,13 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state) return ret; } + ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt); + if (ret) { + dev_err(dev, "invalid value returned by FW\n"); + adf_send_admin_tl_stop(accel_dev); + return ret; + } + telemetry->hbuffs = state; atomic_set(&telemetry->state, state); diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h index 9be81cd3b8..e54a406cc1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -40,6 +40,7 @@ struct adf_tl_hw_data { u8 num_dev_counters; u8 num_rp_counters; u8 max_rp; + u8 max_sl_cnt; }; struct adf_telemetry { diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c index 79b4e74804..6bfc59e677 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c @@ -138,6 +138,10 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev, return -ENOMEM; cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&pdev->dev, cptr_dma)) { + kfree(hctx); + return -ENOMEM; + } cn10k_cpt_hw_ctx_set(hctx, 1); er_ctx->hw_ctx = hctx; diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c index cf8bda7f08..7ec14b5b84 100644 --- a/drivers/crypto/starfive/jh7110-rsa.c +++ b/drivers/crypto/starfive/jh7110-rsa.c @@ -273,7 +273,6 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc) err_rsa_crypt: writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); - kfree(rctx->rsa_data); return ret; } diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 0df09bd794..2773f05adb 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -1045,3 +1045,32 @@ long cxl_pci_get_latency(struct pci_dev *pdev) return cxl_flit_size(pdev) * MEGA / bw; } + +static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data) +{ + struct cxl_port *port = data; + struct cxl_decoder *cxld; + struct cxl_hdm *cxlhdm; + void __iomem *hdm; + u32 ctrl; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) + return 0; + + cxlhdm = dev_get_drvdata(&port->dev); + hdm = cxlhdm->regs.hdm_decoder; + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); + + return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl); +} + +bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port) +{ + return device_for_each_child(&port->dev, port, + __cxl_endpoint_decoder_reset_detected); +} +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 5c186e0a39..18b9514964 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2352,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, struct device *dev; int rc; - switch (mode) { - case CXL_DECODER_RAM: - case CXL_DECODER_PMEM: - break; - default: - dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); - return ERR_PTR(-EINVAL); - } - cxlr = cxl_region_alloc(cxlrd, id); if (IS_ERR(cxlr)) return cxlr; @@ -2415,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, { int rc; + switch (mode) { + case CXL_DECODER_RAM: + case CXL_DECODER_PMEM: + break; + default: + dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); + return ERR_PTR(-EINVAL); + } + rc = memregion_alloc(GFP_KERNEL); if (rc < 0) return ERR_PTR(rc); @@ -2719,6 +2719,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) if (i == 0) { cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); if (!cxl_nvb) { + kfree(cxlr_pmem); cxlr_pmem = ERR_PTR(-ENODEV); goto out; } diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index e5f13260fc..7c5cd069f1 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event, * DRAM Event Record * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 */ -#define CXL_DPA_FLAGS_MASK 0x3F -#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK) +#define CXL_DPA_FLAGS_MASK GENMASK(1, 0) +#define CXL_DPA_MASK GENMASK_ULL(63, 6) #define CXL_DPA_VOLATILE BIT(0) #define CXL_DPA_NOT_REPAIRABLE BIT(1) diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 036d17db68..72fa477407 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -891,6 +891,8 @@ void cxl_coordinates_combine(struct access_coordinate *out, struct access_coordinate *c1, struct access_coordinate *c2); +bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port); + /* * Unit test builds overrides this to __weak, find the 'strong' version * of these symbols in tools/testing/cxl/. diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 2ff361e756..659f9d46b1 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -957,11 +957,33 @@ static void cxl_error_resume(struct pci_dev *pdev) dev->driver ? "successful" : "failed"); } +static void cxl_reset_done(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &pdev->dev; + + /* + * FLR does not expect to touch the HDM decoders and related + * registers. SBR, however, will wipe all device configurations. + * Issue a warning if there was an active decoder before the reset + * that no longer exists. + */ + guard(device)(&cxlmd->dev); + if (cxlmd->endpoint && + cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) { + dev_crit(dev, "SBR happened without memory regions removal.\n"); + dev_crit(dev, "System may be unstable if regions hosted system memory.\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } +} + static const struct pci_error_handlers cxl_error_handlers = { .error_detected = cxl_error_detected, .slot_reset = cxl_slot_reset, .resume = cxl_error_resume, .cor_error_detected = cxl_cor_error_detected, + .reset_done = cxl_reset_done, }; static struct pci_driver cxl_pci_driver = { diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 797e1ebff2..f24b67c64d 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -192,7 +192,7 @@ static u64 dev_dax_size(struct dev_dax *dev_dax) u64 size = 0; int i; - WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem)); + lockdep_assert_held(&dax_dev_rwsem); for (i = 0; i < dev_dax->nr_range; i++) size += range_len(&dev_dax->ranges[i].range); @@ -302,7 +302,7 @@ static unsigned long long dax_region_avail_size(struct dax_region *dax_region) resource_size_t size = resource_size(&dax_region->res); struct resource *res; - WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem)); + lockdep_assert_held(&dax_region_rwsem); for_each_dax_region_resource(dax_region, res) size -= resource_size(res); @@ -447,7 +447,7 @@ static void trim_dev_dax_range(struct dev_dax *dev_dax) struct range *range = &dev_dax->ranges[i].range; struct dax_region *dax_region = dev_dax->region; - WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem)); + lockdep_assert_held_write(&dax_region_rwsem); dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, (unsigned long long)range->start, (unsigned long long)range->end); @@ -465,26 +465,17 @@ static void free_dev_dax_ranges(struct dev_dax *dev_dax) trim_dev_dax_range(dev_dax); } -static void __unregister_dev_dax(void *dev) +static void unregister_dev_dax(void *dev) { struct dev_dax *dev_dax = to_dev_dax(dev); dev_dbg(dev, "%s\n", __func__); + down_write(&dax_region_rwsem); kill_dev_dax(dev_dax); device_del(dev); free_dev_dax_ranges(dev_dax); put_device(dev); -} - -static void unregister_dev_dax(void *dev) -{ - if (rwsem_is_locked(&dax_region_rwsem)) - return __unregister_dev_dax(dev); - - if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0)) - return; - __unregister_dev_dax(dev); up_write(&dax_region_rwsem); } @@ -507,7 +498,7 @@ static int __free_dev_dax_id(struct dev_dax *dev_dax) struct dax_region *dax_region; int rc = dev_dax->id; - WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem)); + lockdep_assert_held_write(&dax_dev_rwsem); if (!dev_dax->dyn_id || dev_dax->id < 0) return -1; @@ -560,15 +551,10 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr, if (!victim) return -ENXIO; - rc = down_write_killable(&dax_region_rwsem); - if (rc) - return rc; - rc = down_write_killable(&dax_dev_rwsem); - if (rc) { - up_write(&dax_region_rwsem); - return rc; - } + device_lock(dev); + device_lock(victim); dev_dax = to_dev_dax(victim); + down_write(&dax_dev_rwsem); if (victim->driver || dev_dax_size(dev_dax)) rc = -EBUSY; else { @@ -589,11 +575,12 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr, rc = -EBUSY; } up_write(&dax_dev_rwsem); + device_unlock(victim); /* won the race to invalidate the device, clean it up */ if (do_del) devm_release_action(dev, unregister_dev_dax, victim); - up_write(&dax_region_rwsem); + device_unlock(dev); put_device(victim); return rc; @@ -705,7 +692,7 @@ static void dax_mapping_release(struct device *dev) put_device(parent); } -static void __unregister_dax_mapping(void *data) +static void unregister_dax_mapping(void *data) { struct device *dev = data; struct dax_mapping *mapping = to_dax_mapping(dev); @@ -713,25 +700,12 @@ static void __unregister_dax_mapping(void *data) dev_dbg(dev, "%s\n", __func__); - WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem)); - dev_dax->ranges[mapping->range_id].mapping = NULL; mapping->range_id = -1; device_unregister(dev); } -static void unregister_dax_mapping(void *data) -{ - if (rwsem_is_locked(&dax_region_rwsem)) - return __unregister_dax_mapping(data); - - if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0)) - return; - __unregister_dax_mapping(data); - up_write(&dax_region_rwsem); -} - static struct dev_dax_range *get_dax_range(struct device *dev) { struct dax_mapping *mapping = to_dax_mapping(dev); @@ -830,7 +804,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id) struct device *dev; int rc; - WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem)); + lockdep_assert_held_write(&dax_region_rwsem); if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver, "region disabled\n")) @@ -876,7 +850,7 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start, struct resource *alloc; int i, rc; - WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem)); + lockdep_assert_held_write(&dax_region_rwsem); /* handle the seed alloc special case */ if (!size) { @@ -935,7 +909,7 @@ static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, r struct device *dev = &dev_dax->dev; int rc; - WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem)); + lockdep_assert_held_write(&dax_region_rwsem); if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n")) return -EINVAL; @@ -963,11 +937,11 @@ static ssize_t size_show(struct device *dev, unsigned long long size; int rc; - rc = down_write_killable(&dax_dev_rwsem); + rc = down_read_interruptible(&dax_dev_rwsem); if (rc) return rc; size = dev_dax_size(dev_dax); - up_write(&dax_dev_rwsem); + up_read(&dax_dev_rwsem); return sysfs_emit(buf, "%llu\n", size); } @@ -1566,12 +1540,8 @@ err_id: struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data) { struct dev_dax *dev_dax; - int rc; - - rc = down_write_killable(&dax_region_rwsem); - if (rc) - return ERR_PTR(rc); + down_write(&dax_region_rwsem); dev_dax = __devm_create_dev_dax(data); up_write(&dax_region_rwsem); diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c index b7c6f7ea9e..6a1bfcd0cc 100644 --- a/drivers/dma-buf/st-dma-fence.c +++ b/drivers/dma-buf/st-dma-fence.c @@ -540,6 +540,12 @@ static int race_signal_callback(void *arg) t[i].before = pass; t[i].task = kthread_run(thread_signal_callback, &t[i], "dma-fence:%d", i); + if (IS_ERR(t[i].task)) { + ret = PTR_ERR(t[i].task); + while (--i >= 0) + kthread_stop_put(t[i].task); + return ret; + } get_task_struct(t[i].task); } diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 101394f169..237bce21d1 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) seq_printf(s, "%s: %d\n", obj->name, obj->value); - spin_lock_irq(&obj->lock); + spin_lock(&obj->lock); /* Caller already disabled IRQ. */ list_for_each(pos, &obj->pt_list) { struct sync_pt *pt = container_of(pos, struct sync_pt, link); sync_print_fence(s, &pt->base, false); } - spin_unlock_irq(&obj->lock); + spin_unlock(&obj->lock); } static void sync_print_sync_file(struct seq_file *s, diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 002a5ec806..9fc99cfbef 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -394,7 +394,7 @@ config LS2X_APB_DMA config MCF_EDMA tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs" - depends on M5441x || COMPILE_TEST + depends on M5441x || (COMPILE_TEST && FSL_EDMA=n) select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 4e339c04fc..d5a33e4a91 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -1134,8 +1134,8 @@ static void axi_dmac_remove(struct platform_device *pdev) { struct axi_dmac *dmac = platform_get_drvdata(pdev); - of_dma_controller_free(pdev->dev.of_node); free_irq(dmac->irq, dmac); + of_dma_controller_free(pdev->dev.of_node); tasklet_kill(&dmac->chan.vchan.task); dma_async_device_unregister(&dmac->dma_dev); clk_disable_unprepare(dmac->clk); diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index a86a81ff0c..321446fddd 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num) kfree(desc); return NULL; } + desc->nr_hw_descs = num; return desc; } @@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan, static void axi_desc_put(struct axi_dma_desc *desc) { struct axi_dma_chan *chan = desc->chan; - int count = atomic_read(&chan->descs_allocated); + int count = desc->nr_hw_descs; struct axi_dma_hw_desc *hw_desc; int descs_put; @@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) /* Remove the completed descriptor from issued list before completing */ list_del(&vd->node); vchan_cookie_complete(vd); - - /* Submit queued descriptors after processing the completed ones */ - axi_chan_start_first_queued(chan); } out: diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index 454904d996..ac571b413b 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -104,6 +104,7 @@ struct axi_dma_desc { u32 completed_blocks; u32 length; u32 period_len; + u32 nr_hw_descs; }; struct axi_dma_chan_config { diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 1398814d8f..e3505e5678 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -598,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.dev = chip->sysdev; - dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); + ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); + if (ret) + return ret; ret = dma_async_device_register(&idma64->dma); if (ret) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 3993507117..fd9bbee4cc 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -577,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq) struct idxd_cdev *idxd_cdev; idxd_cdev = wq->idxd_cdev; - ida_destroy(&file_ida); wq->idxd_cdev = NULL; cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); put_device(cdev_dev(idxd_cdev)); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 8dc029c865..fc049c9c98 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry) spin_unlock(&irq_entry->list_lock); - list_for_each_entry(desc, &flist, list) { + list_for_each_entry_safe(desc, n, &flist, list) { /* * Check against the original status as ABORT is software defined * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ + list_del(&desc->list); + if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); continue; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 9c364e92cb..e8f45a7fde 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -534,18 +534,6 @@ err_out: return err; } -static int ioat_register(struct ioatdma_device *ioat_dma) -{ - int err = dma_async_device_register(&ioat_dma->dma_dev); - - if (err) { - ioat_disable_interrupts(ioat_dma); - dma_pool_destroy(ioat_dma->completion_pool); - } - - return err; -} - static void ioat_dma_remove(struct ioatdma_device *ioat_dma) { struct dma_device *dma = &ioat_dma->dma_dev; @@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); } - err = ioat_register(ioat_dma); + err = dma_async_device_register(&ioat_dma->dma_dev); if (err) - return err; + goto err_disable_interrupts; ioat_kobject_add(ioat_dma, &ioat_ktype); @@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) /* disable relaxed ordering */ err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16); - if (err) - return pcibios_err_to_errno(err); + if (err) { + err = pcibios_err_to_errno(err); + goto err_disable_interrupts; + } /* clear relaxed ordering enable */ val16 &= ~PCI_EXP_DEVCTL_RELAX_EN; err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16); - if (err) - return pcibios_err_to_errno(err); + if (err) { + err = pcibios_err_to_errno(err); + goto err_disable_interrupts; + } if (ioat_dma->cap & IOAT_CAP_DPS) writeb(ioat_pending_level + 1, ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); return 0; + +err_disable_interrupts: + ioat_disable_interrupts(ioat_dma); + dma_pool_destroy(ioat_dma->completion_pool); + return err; } static void ioat_shutdown(struct pci_dev *pdev) @@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem * const *iomap; struct device *dev = &pdev->dev; struct ioatdma_device *device; + unsigned int i; + u8 version; int err; err = pcim_enable_device(pdev); @@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!iomap) return -ENOMEM; + version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET); + if (version < IOAT_VER_3_0) + return -ENODEV; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) return err; @@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); pci_set_drvdata(pdev, device); - device->version = readb(device->reg_base + IOAT_VER_OFFSET); + device->version = version; if (device->version >= IOAT_VER_3_4) ioat_dca_enabled = 0; - if (device->version >= IOAT_VER_3_0) { - if (is_skx_ioat(pdev)) - device->version = IOAT_VER_3_2; - err = ioat3_dma_probe(device, ioat_dca_enabled); - } else - return -ENODEV; + if (is_skx_ioat(pdev)) + device->version = IOAT_VER_3_2; + + err = ioat3_dma_probe(device, ioat_dca_enabled); if (err) { + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(device->idx[i]); + kfree(device); dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); return -ENODEV; } @@ -1445,6 +1449,7 @@ module_init(ioat_init_module); static void __exit ioat_exit_module(void) { pci_unregister_driver(&ioat_pci_driver); + kmem_cache_destroy(ioat_sed_cache); kmem_cache_destroy(ioat_cache); } module_exit(ioat_exit_module); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index c9b93055dc..f0a399cf45 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu ret = of_k3_udma_glue_parse(udmax_np, common); if (ret) - goto out_put_spec; + return ret; ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn); - -out_put_spec: - of_node_put(udmax_np); return ret; } diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c index 313b217388..ae6e06057a 100644 --- a/drivers/dma/xilinx/xdma.c +++ b/drivers/dma/xilinx/xdma.c @@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id) u32 st; bool repeat_tx; + spin_lock(&xchan->vchan.lock); + if (xchan->stop_requested) complete(&xchan->last_interrupt); - spin_lock(&xchan->vchan.lock); - /* get submitted request */ vd = vchan_next_desc(&xchan->vchan); if (!vd) diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c index d0f6693ca1..32019dc33c 100644 --- a/drivers/dpll/dpll_core.c +++ b/drivers/dpll/dpll_core.c @@ -449,7 +449,7 @@ static int dpll_pin_prop_dup(const struct dpll_pin_properties *src, sizeof(*src->freq_supported); dst->freq_supported = kmemdup(src->freq_supported, freq_size, GFP_KERNEL); - if (!src->freq_supported) + if (!dst->freq_supported) return -ENOMEM; } if (src->board_label) { diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 1f3520d768..a17f3c0cdf 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -81,7 +81,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, amd64_warn("%s: error reading F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); - return err; + return pcibios_err_to_errno(err); } int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, @@ -94,7 +94,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, amd64_warn("%s: error writing to F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); - return err; + return pcibios_err_to_errno(err); } /* @@ -1025,8 +1025,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt) } ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp); - if (ret) + if (ret) { + ret = pcibios_err_to_errno(ret); goto out; + } gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp); gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp); diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index cdd8480e73..dbe9fe5f2c 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -800,7 +800,7 @@ static int errcmd_enable_error_reporting(bool enable) rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd); if (rc) - return rc; + return pcibios_err_to_errno(rc); if (enable) errcmd |= ERRCMD_CE | ERRSTS_UE; @@ -809,7 +809,7 @@ static int errcmd_enable_error_reporting(bool enable) rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd); if (rc) - return rc; + return pcibios_err_to_errno(rc); return 0; } diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 9c5b6f8bd8..27996b7924 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -648,7 +648,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, memset(&res, 0, sizeof(res)); res.mce = mce; res.addr = mce->addr & MCI_ADDR_PHYSADDR; - if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) { + if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) { pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank); return NOTIFY_DONE; } diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 5f869eacd1..3da94b3822 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -116,7 +116,8 @@ config EXTCON_MAX77843 config EXTCON_MAX8997 tristate "Maxim MAX8997 EXTCON Support" - depends on MFD_MAX8997 && IRQ_DOMAIN + depends on MFD_MAX8997 + select IRQ_DOMAIN help If you say yes here you get support for the MUIC device of Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 5f3a3e913d..d19c78a78a 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c @@ -169,9 +169,14 @@ static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) return 0; } +static void dmi_dev_release(struct device *dev) +{ + kfree(dev); +} + static struct class dmi_class = { .name = "dmi", - .dev_release = (void(*)(struct device *)) kfree, + .dev_release = dmi_dev_release, .dev_uevent = dmi_dev_uevent, }; diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 70e9789ff9..6a337f1f87 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -335,8 +335,8 @@ fail_free_new_fdt: fail: efi_free(fdt_size, fdt_addr); - - efi_bs_call(free_pool, priv.runtime_map); + if (!efi_novamap) + efi_bs_call(free_pool, priv.runtime_map); return EFI_LOAD_ERROR; } diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c index 684c935463..d0ef93551c 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) unsigned long __weak kernel_entry_address(unsigned long kernel_addr, efi_loaded_image_t *image) { - return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr; + return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr; } efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index d5a8182cf2..1983fd3bf3 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -776,6 +776,26 @@ static void error(char *str) efi_warn("Decompression failed: %s\n", str); } +static const char *cmdline_memmap_override; + +static efi_status_t parse_options(const char *cmdline) +{ + static const char opts[][14] = { + "mem=", "memmap=", "efi_fake_mem=", "hugepages=" + }; + + for (int i = 0; i < ARRAY_SIZE(opts); i++) { + const char *p = strstr(cmdline, opts[i]); + + if (p == cmdline || (p > cmdline && isspace(p[-1]))) { + cmdline_memmap_override = opts[i]; + break; + } + } + + return efi_parse_options(cmdline); +} + static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) { unsigned long virt_addr = LOAD_PHYSICAL_ADDR; @@ -807,6 +827,10 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry) !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) { efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n"); seed[0] = 0; + } else if (cmdline_memmap_override) { + efi_info("%s detected on the kernel command line - disabling physical KASLR\n", + cmdline_memmap_override); + seed[0] = 0; } boot_params_ptr->hdr.loadflags |= KASLR_FLAG; @@ -883,7 +907,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle, } #ifdef CONFIG_CMDLINE_BOOL - status = efi_parse_options(CONFIG_CMDLINE); + status = parse_options(CONFIG_CMDLINE); if (status != EFI_SUCCESS) { efi_err("Failed to parse options\n"); goto fail; @@ -892,7 +916,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle, if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr | ((u64)boot_params->ext_cmd_line_ptr << 32)); - status = efi_parse_options((char *)cmdline_paddr); + status = parse_options((char *)cmdline_paddr); if (status != EFI_SUCCESS) { efi_err("Failed to parse options\n"); goto fail; diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 3365944f79..34109fd86c 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -15,10 +15,6 @@ #include <asm/early_ioremap.h> #include <asm/efi.h> -#ifndef __efi_memmap_free -#define __efi_memmap_free(phys, size, flags) do { } while (0) -#endif - /** * __efi_memmap_init - Common code for mapping the EFI memory map * @data: EFI memory map data @@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data) return -ENOMEM; } - if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB)) - __efi_memmap_free(efi.memmap.phys_map, - efi.memmap.desc_size * efi.memmap.nr_map, - efi.memmap.flags); - map.phys_map = data->phys_map; map.nr_map = data->size / data->desc_size; map.map_end = map.map + data->size; diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index d9629ff878..2328ca58bb 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state) static int psci_system_suspend(unsigned long unused) { + int err; phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume); - return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), + err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), pa_cpu_resume, 0, 0); + return psci_to_linux_errno(err); } static int psci_system_suspend_enter(suspend_state_t state) diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 90283f160a..2ad85052b3 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -569,13 +569,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; desc.args[1] = mdata_phys; ret = qcom_scm_call(__scm->dev, &desc, &res); - qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); out: @@ -637,10 +638,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -672,10 +675,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral) ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -706,11 +711,12 @@ int qcom_scm_pas_shutdown(u32 peripheral) ret = qcom_scm_bw_enable(); if (ret) - return ret; + goto disable_clk; ret = qcom_scm_call(__scm->dev, &desc, &res); - qcom_scm_bw_disable(); + +disable_clk: qcom_scm_clk_disable(); return ret ? : res.result[0]; @@ -1624,7 +1630,7 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); * We do not yet support re-entrant calls via the qseecom interface. To prevent + any potential issues with this, only allow validated machines for now. */ -static const struct of_device_id qcom_scm_qseecom_allowlist[] = { +static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { { .compatible = "lenovo,thinkpad-x13s", }, { } }; @@ -1713,7 +1719,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm) */ bool qcom_scm_is_available(void) { - return !!__scm; + return !!READ_ONCE(__scm); } EXPORT_SYMBOL_GPL(qcom_scm_is_available); @@ -1794,10 +1800,12 @@ static int qcom_scm_probe(struct platform_device *pdev) if (!scm) return -ENOMEM; + scm->dev = &pdev->dev; ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); if (ret < 0) return ret; + init_completion(&scm->waitq_comp); mutex_init(&scm->scm_bw_lock); scm->path = devm_of_icc_get(&pdev->dev, NULL); @@ -1829,10 +1837,8 @@ static int qcom_scm_probe(struct platform_device *pdev) if (ret) return ret; - __scm = scm; - __scm->dev = &pdev->dev; - - init_completion(&__scm->waitq_comp); + /* Let all above stores be available after this */ + smp_store_release(&__scm, scm); irq = platform_get_irq_optional(pdev, 0); if (irq < 0) { diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 322aada20f..ac34876a97 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -9,6 +9,7 @@ #include <linux/dma-mapping.h> #include <linux/kref.h> #include <linux/mailbox_client.h> +#include <linux/mailbox_controller.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -97,8 +98,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw, if (size & 3) return -EINVAL; - buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr, - GFP_ATOMIC); + buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), + &bus_addr, GFP_ATOMIC); if (!buf) return -ENOMEM; @@ -126,7 +127,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw, ret = -EINVAL; } - dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr); + dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr); return ret; } diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 79c473b3c7..8ef395b49b 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -55,33 +55,26 @@ int fpga_bridge_disable(struct fpga_bridge *bridge) } EXPORT_SYMBOL_GPL(fpga_bridge_disable); -static struct fpga_bridge *__fpga_bridge_get(struct device *dev, +static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev, struct fpga_image_info *info) { struct fpga_bridge *bridge; - int ret = -ENODEV; - bridge = to_fpga_bridge(dev); + bridge = to_fpga_bridge(bridge_dev); bridge->info = info; - if (!mutex_trylock(&bridge->mutex)) { - ret = -EBUSY; - goto err_dev; - } + if (!mutex_trylock(&bridge->mutex)) + return ERR_PTR(-EBUSY); - if (!try_module_get(dev->parent->driver->owner)) - goto err_ll_mod; + if (!try_module_get(bridge->br_ops_owner)) { + mutex_unlock(&bridge->mutex); + return ERR_PTR(-ENODEV); + } dev_dbg(&bridge->dev, "get\n"); return bridge; - -err_ll_mod: - mutex_unlock(&bridge->mutex); -err_dev: - put_device(dev); - return ERR_PTR(ret); } /** @@ -98,13 +91,18 @@ err_dev: struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, struct fpga_image_info *info) { - struct device *dev; + struct fpga_bridge *bridge; + struct device *bridge_dev; - dev = class_find_device_by_of_node(&fpga_bridge_class, np); - if (!dev) + bridge_dev = class_find_device_by_of_node(&fpga_bridge_class, np); + if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(of_fpga_bridge_get); @@ -125,6 +123,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) struct fpga_bridge *fpga_bridge_get(struct device *dev, struct fpga_image_info *info) { + struct fpga_bridge *bridge; struct device *bridge_dev; bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev, @@ -132,7 +131,11 @@ struct fpga_bridge *fpga_bridge_get(struct device *dev, if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(bridge_dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(fpga_bridge_get); @@ -146,7 +149,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge) dev_dbg(&bridge->dev, "put\n"); bridge->info = NULL; - module_put(bridge->dev.parent->driver->owner); + module_put(bridge->br_ops_owner); mutex_unlock(&bridge->mutex); put_device(&bridge->dev); } @@ -316,18 +319,19 @@ static struct attribute *fpga_bridge_attrs[] = { ATTRIBUTE_GROUPS(fpga_bridge); /** - * fpga_bridge_register - create and register an FPGA Bridge device + * __fpga_bridge_register - create and register an FPGA Bridge device * @parent: FPGA bridge device from pdev * @name: FPGA bridge name * @br_ops: pointer to structure of fpga bridge ops * @priv: FPGA bridge private data + * @owner: owner module containing the br_ops * * Return: struct fpga_bridge pointer or ERR_PTR() */ struct fpga_bridge * -fpga_bridge_register(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, - void *priv) +__fpga_bridge_register(struct device *parent, const char *name, + const struct fpga_bridge_ops *br_ops, + void *priv, struct module *owner) { struct fpga_bridge *bridge; int id, ret; @@ -357,6 +361,7 @@ fpga_bridge_register(struct device *parent, const char *name, bridge->name = name; bridge->br_ops = br_ops; + bridge->br_ops_owner = owner; bridge->priv = priv; bridge->dev.groups = br_ops->groups; @@ -386,7 +391,7 @@ error_kfree: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_bridge_register); +EXPORT_SYMBOL_GPL(__fpga_bridge_register); /** * fpga_bridge_unregister - unregister an FPGA bridge diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index 06651389c5..0f4035b089 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -664,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = { }; ATTRIBUTE_GROUPS(fpga_mgr); -static struct fpga_manager *__fpga_mgr_get(struct device *dev) +static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev) { struct fpga_manager *mgr; - mgr = to_fpga_manager(dev); + mgr = to_fpga_manager(mgr_dev); - if (!try_module_get(dev->parent->driver->owner)) - goto err_dev; + if (!try_module_get(mgr->mops_owner)) + mgr = ERR_PTR(-ENODEV); return mgr; - -err_dev: - put_device(dev); - return ERR_PTR(-ENODEV); } static int fpga_mgr_dev_match(struct device *dev, const void *data) @@ -693,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data) */ struct fpga_manager *fpga_mgr_get(struct device *dev) { - struct device *mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, - fpga_mgr_dev_match); + struct fpga_manager *mgr; + struct device *mgr_dev; + + mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, fpga_mgr_dev_match); if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(mgr_dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(fpga_mgr_get); @@ -711,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get); */ struct fpga_manager *of_fpga_mgr_get(struct device_node *node) { - struct device *dev; + struct fpga_manager *mgr; + struct device *mgr_dev; - dev = class_find_device_by_of_node(&fpga_mgr_class, node); - if (!dev) + mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node); + if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(of_fpga_mgr_get); @@ -727,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get); */ void fpga_mgr_put(struct fpga_manager *mgr) { - module_put(mgr->dev.parent->driver->owner); + module_put(mgr->mops_owner); put_device(&mgr->dev); } EXPORT_SYMBOL_GPL(fpga_mgr_put); @@ -766,9 +773,10 @@ void fpga_mgr_unlock(struct fpga_manager *mgr) EXPORT_SYMBOL_GPL(fpga_mgr_unlock); /** - * fpga_mgr_register_full - create and register an FPGA Manager device + * __fpga_mgr_register_full - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @info: parameters for fpga manager + * @owner: owner module containing the ops * * The caller of this function is responsible for calling fpga_mgr_unregister(). * Using devm_fpga_mgr_register_full() instead is recommended. @@ -776,7 +784,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock); * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ struct fpga_manager * -fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) +__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { const struct fpga_manager_ops *mops = info->mops; struct fpga_manager *mgr; @@ -804,6 +813,8 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in mutex_init(&mgr->ref_mutex); + mgr->mops_owner = owner; + mgr->name = info->name; mgr->mops = info->mops; mgr->priv = info->priv; @@ -841,14 +852,15 @@ error_kfree: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_mgr_register_full); +EXPORT_SYMBOL_GPL(__fpga_mgr_register_full); /** - * fpga_mgr_register - create and register an FPGA Manager device + * __fpga_mgr_register - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data + * @owner: owner module containing the ops * * The caller of this function is responsible for calling fpga_mgr_unregister(). * Using devm_fpga_mgr_register() instead is recommended. This simple @@ -859,8 +871,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_register_full); * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ struct fpga_manager * -fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv) +__fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, struct module *owner) { struct fpga_manager_info info = { 0 }; @@ -868,9 +880,9 @@ fpga_mgr_register(struct device *parent, const char *name, info.mops = mops; info.priv = priv; - return fpga_mgr_register_full(parent, &info); + return __fpga_mgr_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(fpga_mgr_register); +EXPORT_SYMBOL_GPL(__fpga_mgr_register); /** * fpga_mgr_unregister - unregister an FPGA manager @@ -900,9 +912,10 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) } /** - * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() + * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() * @parent: fpga manager device from pdev * @info: parameters for fpga manager + * @owner: owner module containing the ops * * Return: fpga manager pointer on success, negative error code otherwise. * @@ -910,7 +923,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) * function will be called automatically when the managing device is detached. */ struct fpga_manager * -devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) +__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { struct fpga_mgr_devres *dr; struct fpga_manager *mgr; @@ -919,7 +933,7 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf if (!dr) return ERR_PTR(-ENOMEM); - mgr = fpga_mgr_register_full(parent, info); + mgr = __fpga_mgr_register_full(parent, info, owner); if (IS_ERR(mgr)) { devres_free(dr); return mgr; @@ -930,14 +944,15 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf return mgr; } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full); /** - * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() + * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data + * @owner: owner module containing the ops * * Return: fpga manager pointer on success, negative error code otherwise. * @@ -946,8 +961,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); * device is detached. */ struct fpga_manager * -devm_fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv) +__devm_fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, + struct module *owner) { struct fpga_manager_info info = { 0 }; @@ -955,9 +971,9 @@ devm_fpga_mgr_register(struct device *parent, const char *name, info.mops = mops; info.priv = priv; - return devm_fpga_mgr_register_full(parent, &info); + return __devm_fpga_mgr_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_register); +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register); static void fpga_mgr_dev_release(struct device *dev) { diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index b364a92942..753cd14250 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -53,7 +53,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region) } get_device(dev); - if (!try_module_get(dev->parent->driver->owner)) { + if (!try_module_get(region->ops_owner)) { put_device(dev); mutex_unlock(®ion->mutex); return ERR_PTR(-ENODEV); @@ -75,7 +75,7 @@ static void fpga_region_put(struct fpga_region *region) dev_dbg(dev, "put\n"); - module_put(dev->parent->driver->owner); + module_put(region->ops_owner); put_device(dev); mutex_unlock(®ion->mutex); } @@ -181,14 +181,16 @@ static struct attribute *fpga_region_attrs[] = { ATTRIBUTE_GROUPS(fpga_region); /** - * fpga_region_register_full - create and register an FPGA Region device + * __fpga_region_register_full - create and register an FPGA Region device * @parent: device parent * @info: parameters for FPGA Region + * @owner: module containing the get_bridges function * * Return: struct fpga_region or ERR_PTR() */ struct fpga_region * -fpga_region_register_full(struct device *parent, const struct fpga_region_info *info) +__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info, + struct module *owner) { struct fpga_region *region; int id, ret = 0; @@ -213,6 +215,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info * region->compat_id = info->compat_id; region->priv = info->priv; region->get_bridges = info->get_bridges; + region->ops_owner = owner; mutex_init(®ion->mutex); INIT_LIST_HEAD(®ion->bridge_list); @@ -241,13 +244,14 @@ err_free: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_region_register_full); +EXPORT_SYMBOL_GPL(__fpga_region_register_full); /** - * fpga_region_register - create and register an FPGA Region device + * __fpga_region_register - create and register an FPGA Region device * @parent: device parent * @mgr: manager that programs this region * @get_bridges: optional function to get bridges to a list + * @owner: module containing the get_bridges function * * This simple version of the register function should be sufficient for most users. * The fpga_region_register_full() function is available for users that need to @@ -256,17 +260,17 @@ EXPORT_SYMBOL_GPL(fpga_region_register_full); * Return: struct fpga_region or ERR_PTR() */ struct fpga_region * -fpga_region_register(struct device *parent, struct fpga_manager *mgr, - int (*get_bridges)(struct fpga_region *)) +__fpga_region_register(struct device *parent, struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *), struct module *owner) { struct fpga_region_info info = { 0 }; info.mgr = mgr; info.get_bridges = get_bridges; - return fpga_region_register_full(parent, &info); + return __fpga_region_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(fpga_region_register); +EXPORT_SYMBOL_GPL(__fpga_region_register); /** * fpga_region_unregister - unregister an FPGA region diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b50d0b4708..cbfcfefdb5 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1549,7 +1549,7 @@ config GPIO_TPS68470 are "output only" GPIOs. config GPIO_TQMX86 - tristate "TQ-Systems QTMX86 GPIO" + tristate "TQ-Systems TQMx86 GPIO" depends on MFD_TQMX86 || COMPILE_TEST depends on HAS_IOPORT_MAP select GPIOLIB_IRQCHIP diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c index d31788b43a..2605706145 100644 --- a/drivers/gpio/gpio-npcm-sgpio.c +++ b/drivers/gpio/gpio-npcm-sgpio.c @@ -434,7 +434,7 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc) struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct irq_chip *ic = irq_desc_get_chip(desc); struct npcm_sgpio *gpio = gpiochip_get_data(gc); - unsigned int i, j, girq; + unsigned int i, j; unsigned long reg; chained_irq_enter(ic, desc); @@ -443,11 +443,9 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc) const struct npcm_sgpio_bank *bank = &npcm_sgpio_banks[i]; reg = ioread8(bank_reg(gpio, bank, EVENT_STS)); - for_each_set_bit(j, ®, 8) { - girq = irq_find_mapping(gc->irq.domain, - i * 8 + gpio->nout_sgpio + j); - generic_handle_domain_irq(gc->irq.domain, girq); - } + for_each_set_bit(j, ®, 8) + generic_handle_domain_irq(gc->irq.domain, + i * 8 + gpio->nout_sgpio + j); } chained_irq_exit(ic, desc); diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index 3a28c1f273..f2e7e8754d 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -6,6 +6,7 @@ * Vadim V.Vlasov <vvlasov@dev.rtsoft.ru> */ +#include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/gpio/driver.h> @@ -28,16 +29,25 @@ #define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */ #define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */ +#define TQMX86_GPII_NONE 0 #define TQMX86_GPII_FALLING BIT(0) #define TQMX86_GPII_RISING BIT(1) +/* Stored in irq_type as a trigger type, but not actually valid as a register + * value, so the name doesn't use "GPII" + */ +#define TQMX86_INT_BOTH (BIT(0) | BIT(1)) #define TQMX86_GPII_MASK (BIT(0) | BIT(1)) #define TQMX86_GPII_BITS 2 +/* Stored in irq_type with GPII bits */ +#define TQMX86_INT_UNMASKED BIT(2) struct tqmx86_gpio_data { struct gpio_chip chip; void __iomem *io_base; int irq; + /* Lock must be held for accessing output and irq_type fields */ raw_spinlock_t spinlock; + DECLARE_BITMAP(output, TQMX86_NGPIO); u8 irq_type[TQMX86_NGPI]; }; @@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset, { struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); unsigned long flags; - u8 val; raw_spin_lock_irqsave(&gpio->spinlock, flags); - val = tqmx86_gpio_read(gpio, TQMX86_GPIOD); - if (value) - val |= BIT(offset); - else - val &= ~BIT(offset); - tqmx86_gpio_write(gpio, val, TQMX86_GPIOD); + __assign_bit(offset, gpio->output, value); + tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); } @@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip, return GPIO_LINE_DIRECTION_OUT; } +static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset) + __must_hold(&gpio->spinlock) +{ + u8 type = TQMX86_GPII_NONE, gpiic; + + if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) { + type = gpio->irq_type[offset] & TQMX86_GPII_MASK; + + if (type == TQMX86_INT_BOTH) + type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO) + ? TQMX86_GPII_FALLING + : TQMX86_GPII_RISING; + } + + gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); + gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS)); + gpiic |= type << (offset * TQMX86_GPII_BITS); + tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); +} + static void tqmx86_gpio_irq_mask(struct irq_data *data) { unsigned int offset = (data->hwirq - TQMX86_NGPO); struct tqmx86_gpio_data *gpio = gpiochip_get_data( irq_data_get_irq_chip_data(data)); unsigned long flags; - u8 gpiic, mask; - - mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~mask; - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); + gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data)); } @@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data) struct tqmx86_gpio_data *gpio = gpiochip_get_data( irq_data_get_irq_chip_data(data)); unsigned long flags; - u8 gpiic, mask; - - mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data)); + raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~mask; - gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS); - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] |= TQMX86_INT_UNMASKED; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); } @@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) unsigned int offset = (data->hwirq - TQMX86_NGPO); unsigned int edge_type = type & IRQF_TRIGGER_MASK; unsigned long flags; - u8 new_type, gpiic; + u8 new_type; switch (edge_type) { case IRQ_TYPE_EDGE_RISING: @@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) new_type = TQMX86_GPII_FALLING; break; case IRQ_TYPE_EDGE_BOTH: - new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING; + new_type = TQMX86_INT_BOTH; break; default: return -EINVAL; /* not supported */ } - gpio->irq_type[offset] = new_type; - raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS)); - gpiic |= new_type << (offset * TQMX86_GPII_BITS); - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] &= ~TQMX86_GPII_MASK; + gpio->irq_type[offset] |= new_type; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); return 0; @@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc) struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); struct irq_chip *irq_chip = irq_desc_get_chip(desc); - unsigned long irq_bits; - int i = 0; + unsigned long irq_bits, flags; + int i; u8 irq_status; chained_irq_enter(irq_chip, desc); @@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc) tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS); irq_bits = irq_status; + + raw_spin_lock_irqsave(&gpio->spinlock, flags); + for_each_set_bit(i, &irq_bits, TQMX86_NGPI) { + /* + * Edge-both triggers are implemented by flipping the edge + * trigger after each interrupt, as the controller only supports + * either rising or falling edge triggers, but not both. + * + * Internally, the TQMx86 GPIO controller has separate status + * registers for rising and falling edge interrupts. GPIIC + * configures which bits from which register are visible in the + * interrupt status register GPIIS and defines what triggers the + * parent IRQ line. Writing to GPIIS always clears both rising + * and falling interrupt flags internally, regardless of the + * currently configured trigger. + * + * In consequence, we can cleanly implement the edge-both + * trigger in software by first clearing the interrupt and then + * setting the new trigger based on the current GPIO input in + * tqmx86_gpio_irq_config() - even if an edge arrives between + * reading the input and setting the trigger, we will have a new + * interrupt pending. + */ + if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH) + tqmx86_gpio_irq_config(gpio, i); + } + raw_spin_unlock_irqrestore(&gpio->spinlock, flags); + for_each_set_bit(i, &irq_bits, TQMX86_NGPI) generic_handle_domain_irq(gpio->chip.irq.domain, i + TQMX86_NGPO); @@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD); + /* + * Reading the previous output state is not possible with TQMx86 hardware. + * Initialize all outputs to 0 to have a defined state that matches the + * shadow register. + */ + tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD); + chip = &gpio->chip; chip->label = "gpio-tqmx86"; chip->owner = THIS_MODULE; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 7f140df40f..c1e190d3ea 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -128,7 +128,24 @@ static bool acpi_gpio_deferred_req_irqs_done; static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data) { - return device_match_acpi_handle(&gc->gpiodev->dev, data); + /* First check the actual GPIO device */ + if (device_match_acpi_handle(&gc->gpiodev->dev, data)) + return true; + + /* + * When the ACPI device is artificially split to the banks of GPIOs, + * where each of them is represented by a separate GPIO device, + * the firmware node of the physical device may not be shared among + * the banks as they may require different values for the same property, + * e.g., number of GPIOs in a certain bank. In such case the ACPI handle + * of a GPIO device is NULL and can not be used. Hence we have to check + * the parent device to be sure that there is no match before bailing + * out. + */ + if (gc->parent) + return device_match_acpi_handle(gc->parent, data); + + return false; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e4d4e55c08..0535b07987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1188,7 +1188,8 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, int ret; ctx->sync = &mem->sync; - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&ctx->exec) { ctx->n_vms = 0; list_for_each_entry(entry, &mem->attachments, list) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 6857c586de..c8c23dbb90 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -211,6 +211,7 @@ union igp_info { struct atom_integrated_system_info_v1_11 v11; struct atom_integrated_system_info_v1_12 v12; struct atom_integrated_system_info_v2_1 v21; + struct atom_integrated_system_info_v2_3 v23; }; union umc_info { @@ -359,6 +360,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, if (vram_type) *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); break; + case 3: + mem_channel_number = igp_info->v23.umachannelnumber; + if (!mem_channel_number) + mem_channel_number = 1; + mem_type = igp_info->v23.memorytype; + if (mem_type == LpDdr5MemType) + mem_channel_width = 32; + else + mem_channel_width = 64; + if (vram_width) + *vram_width = mem_channel_number * mem_channel_width; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7753a2e64d..941d6e379b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5809,13 +5809,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; - while ((parent = pci_upstream_bridge(parent))) { - /* skip upstream/downstream switches internal to dGPU*/ - if (parent->vendor == PCI_VENDOR_ID_ATI) - continue; - *speed = pcie_get_speed_cap(parent); - *width = pcie_get_width_cap(parent); - break; + if (amdgpu_device_pcie_dynamic_switching_supported(adev)) { + while ((parent = pci_upstream_bridge(parent))) { + /* skip upstream/downstream switches internal to dGPU*/ + if (parent->vendor == PCI_VENDOR_ID_ATI) + continue; + *speed = pcie_get_speed_cap(parent); + *width = pcie_get_width_cap(parent); + break; + } + } else { + /* use the current speeds rather than max if switching is not supported */ + pcie_bandwidth_available(adev->pdev, NULL, speed, width); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index be4629cdac..08b9dfb653 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; unsigned int ndw; - signed long r; + int r; uint32_t seq; - if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready || - !down_read_trylock(&adev->reset_domain->sem)) { + /* + * A GPU reset should flush all TLBs anyway, so no need to do + * this while one is ongoing. + */ + if (!down_read_trylock(&adev->reset_domain->sem)) + return 0; + if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) { if (adev->gmc.flush_tlb_needs_extra_type_2) adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, 2, all_hub, @@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid, adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst); - return 0; - } + r = 0; + } else { + /* 2 dwords flush + 8 dwords fence */ + ndw = kiq->pmf->invalidate_tlbs_size + 8; - /* 2 dwords flush + 8 dwords fence */ - ndw = kiq->pmf->invalidate_tlbs_size + 8; + if (adev->gmc.flush_tlb_needs_extra_type_2) + ndw += kiq->pmf->invalidate_tlbs_size; - if (adev->gmc.flush_tlb_needs_extra_type_2) - ndw += kiq->pmf->invalidate_tlbs_size; + if (adev->gmc.flush_tlb_needs_extra_type_0) + ndw += kiq->pmf->invalidate_tlbs_size; - if (adev->gmc.flush_tlb_needs_extra_type_0) - ndw += kiq->pmf->invalidate_tlbs_size; + spin_lock(&adev->gfx.kiq[inst].ring_lock); + amdgpu_ring_alloc(ring, ndw); + if (adev->gmc.flush_tlb_needs_extra_type_2) + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); - spin_lock(&adev->gfx.kiq[inst].ring_lock); - amdgpu_ring_alloc(ring, ndw); - if (adev->gmc.flush_tlb_needs_extra_type_2) - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub); + if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub); - if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0) - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub); + kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) { + amdgpu_ring_undo(ring); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + goto error_unlock_reset; + } - kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); - r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); - if (r) { - amdgpu_ring_undo(ring); + amdgpu_ring_commit(ring); spin_unlock(&adev->gfx.kiq[inst].ring_lock); - goto error_unlock_reset; - } - - amdgpu_ring_commit(ring); - spin_unlock(&adev->gfx.kiq[inst].ring_lock); - r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); - if (r < 1) { - dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - r = -ETIME; - goto error_unlock_reset; + if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) { + dev_err(adev->dev, "timeout waiting for kiq fence\n"); + r = -ETIME; + } } - r = 0; error_unlock_reset: up_read(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index b53c8fd4e8..d89d6829f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -431,16 +431,16 @@ out: static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) { - const char *chip_name; + char ucode_prefix[15]; int r; - chip_name = "gc_9_4_3"; + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name); + r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix); if (r) return r; - r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name); + r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 43775cb67f..8f23176675 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2021,6 +2021,9 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: SDMA trap\n"); instance = sdma_v4_0_irq_id_to_seq(entry->client_id); + if (instance < 0) + return instance; + switch (entry->ring_id) { case 0: amdgpu_fence_process(&adev->sdma.instance[instance].ring); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 719d6d365e..ff01610fbc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) f2g = &gfx_v11_kfd2kgd; break; case IP_VERSION(11, 0, 3): - if ((adev->pdev->device == 0x7460 && - adev->pdev->revision == 0x00) || - (adev->pdev->device == 0x7461 && - adev->pdev->revision == 0x00)) - /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */ - gfx_target_version = 110005; - else - /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ - gfx_target_version = 110001; + /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ + gfx_target_version = 110001; f2g = &gfx_v11_kfd2kgd; break; case IP_VERSION(11, 5, 0): diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d6e71aa808..f866a02f4f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9149,9 +9149,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); - if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed) - dc_allow_idle_optimizations(dm->dc, false); - drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index c27063305a..2c36f3d00c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -363,7 +363,7 @@ void dm_helpers_dp_mst_send_payload_allocation( mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); - ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); + ret = drm_dp_add_payload_part2(mst_mgr, new_payload); if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index cb31a699c6..1a269099f1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, &connector->base, dev->mode_config.tile_property, 0); + connector->colorspace_property = master->base.colorspace_property; + if (connector->colorspace_property) + drm_connector_attach_colorspace_property(connector); drm_connector_set_path_property(connector, pathprop); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 6083b1dcf0..a72e849ece 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1340,16 +1340,27 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ * Powering up the hardware requires notifying PMFW and DMCUB. * Clearing the driver idle allow requires a DMCUB command. * DMCUB commands requires the DMCUB to be powered up and restored. - * - * Exit out early to prevent an infinite loop of DMCUB commands - * triggering exit low power - use software state to track this. */ - dc_dmub_srv->idle_allowed = allow_idle; - if (!allow_idle) + if (!allow_idle) { dc_dmub_srv_exit_low_power_state(dc); - else + /* + * Idle is considered fully exited only after the sequence above + * fully completes. If we have a race of two threads exiting + * at the same time then it's safe to perform the sequence + * twice as long as we're not re-entering. + * + * Infinite command submission is avoided by using the + * dm_execute_dmub_cmd submission instead of the "wake" helpers. + */ + dc_dmub_srv->idle_allowed = false; + } else { + /* Consider idle as notified prior to the actual submission to + * prevent multiple entries. */ + dc_dmub_srv->idle_allowed = true; + dc_dmub_srv_notify_idle(dc, allow_idle); + } } bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index b7e57aa273..b0d192c6e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -402,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) { + DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n", + i, TRANSFER_FUNC_POINTS); + return false; + } rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 9067ca78f8..eb6c6ba64c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -999,8 +999,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->plane_res.dpp) update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false; - if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) && - pipe_ctx->plane_res.mpcc_inst >= 0) + if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false; if (pipe_ctx->stream_res.dsc) @@ -1374,3 +1373,75 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, set_static_screen_control(pipe_ctx[i]->stream_res.tg, triggers, params->num_frames); } + +static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx) +{ + /* Calculate average pixel count per TU, return false if under ~2.00 to + * avoid empty TUs. This is only required for DPIA tunneling as empty TUs + * are legal to generate for native DP links. Assume TU size 64 as there + * is currently no scenario where it's reprogrammed from HW default. + * MTPs have no such limitation, so this does not affect MST use cases. + */ + unsigned int pix_clk_mhz; + unsigned int symclk_mhz; + unsigned int avg_pix_per_tu_x1000; + unsigned int tu_size_bytes = 64; + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings; + const struct dc *dc = pipe_ctx->stream->link->dc; + + if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + return false; + + // Not necessary for MST configurations + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + return false; + + pix_clk_mhz = timing->pix_clk_100hz / 10000; + + // If this is true, can't block due to dynamic ODM + if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz) + return false; + + switch (link_settings->link_rate) { + case LINK_RATE_LOW: + symclk_mhz = 162; + break; + case LINK_RATE_HIGH: + symclk_mhz = 270; + break; + case LINK_RATE_HIGH2: + symclk_mhz = 540; + break; + case LINK_RATE_HIGH3: + symclk_mhz = 810; + break; + default: + // We shouldn't be tunneling any other rates, something is wrong + ASSERT(0); + return false; + } + + avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes) + / (symclk_mhz * link_settings->lane_count); + + // Add small empirically-decided margin to account for potential jitter + return (avg_pix_per_tu_x1000 < 2020); +} + +bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + + if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) + return false; + + if (should_avoid_empty_tu(pipe_ctx)) + return false; + + if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && + dc->debug.enable_dp_dig_pixel_rate_div_policy) + return true; + + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index c354efa6c1..91f5d1136a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -93,4 +93,6 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx, void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *params); +bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index a93073055e..6c8da59b79 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, - .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy, .dsc_pg_control = dcn35_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .enable_plane = dcn35_enable_plane, diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index af3eebb4c9..1acb2d2c55 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1657,6 +1657,49 @@ struct atom_integrated_system_info_v2_2 uint32_t reserved4[189]; }; +struct uma_carveout_option { + char optionName[29]; //max length of string is 28chars + '\0'. Current design is for "minimum", "Medium", "High". This makes entire struct size 64bits + uint8_t memoryCarvedGb; //memory carved out with setting + uint8_t memoryRemainingGb; //memory remaining on system + union { + struct _flags { + uint8_t Auto : 1; + uint8_t Custom : 1; + uint8_t Reserved : 6; + } flags; + uint8_t all8; + } uma_carveout_option_flags; +}; + +struct atom_integrated_system_info_v2_3 { + struct atom_common_table_header table_header; + uint32_t vbios_misc; // enum of atom_system_vbiosmisc_def + uint32_t gpucapinfo; // enum of atom_system_gpucapinf_def + uint32_t system_config; + uint32_t cpucapinfo; + uint16_t gpuclk_ss_percentage; // unit of 0.001%, 1000 mean 1% + uint16_t gpuclk_ss_type; + uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def + uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication. + uint8_t umachannelnumber; // number of memory channels + uint8_t htc_hyst_limit; + uint8_t htc_tmp_limit; + uint8_t reserved1; // dp_ss_control + uint8_t gpu_package_id; + struct edp_info_table edp1_info; + struct edp_info_table edp2_info; + uint32_t reserved2[8]; + struct atom_external_display_connection_info extdispconninfo; + uint8_t UMACarveoutVersion; + uint8_t UMACarveoutIndexMax; + uint8_t UMACarveoutTypeDefault; + uint8_t UMACarveoutIndexDefault; + uint8_t UMACarveoutType; //Auto or Custom + uint8_t UMACarveoutIndex; + struct uma_carveout_option UMASizeControlOption[20]; + uint8_t reserved3[110]; +}; + // system_config enum atom_system_vbiosmisc_def{ INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01, diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 5cb4725c77..c8586cb7d0 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 4abfcd3274..2fb6c9cb0f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && adev->in_s4) { - /* Adds a GFX reset as workaround just before sending the - * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering - * an invalid state. - */ - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, - SMU_RESET_MODE_2, NULL); - if (ret) - return ret; + if (!en && !adev->in_s0ix) { + if (adev->in_s4) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + } ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c index f3e7441726..f4e76b46ca 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c, u32 avail_scalers; pipe_st = komeda_pipeline_get_state(c->pipeline, state); - if (!pipe_st) + if (IS_ERR_OR_NULL(pipe_st)) return NULL; avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^ diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 626709bec6..2577f0cef8 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -72,7 +72,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector) __drm_atomic_helper_connector_destroy_state(connector->state); kfree(connector->state); - __drm_atomic_helper_connector_reset(connector, &mw_state->base); + connector->state = NULL; + + if (mw_state) + __drm_atomic_helper_connector_reset(connector, &mw_state->base); } static enum drm_connector_status diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 9d96d28d6f..59e9ad3499 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2066,10 +2066,8 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx) }; host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node); - if (!host) { - DRM_DEV_ERROR(dev, "fail to find dsi host.\n"); - return -EPROBE_DEFER; - } + if (!host) + return dev_err_probe(dev, -EPROBE_DEFER, "fail to find dsi host.\n"); dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { @@ -2471,15 +2469,22 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge, mutex_unlock(&ctx->aux_lock); } +static void +anx7625_audio_update_connector_status(struct anx7625_data *ctx, + enum drm_connector_status status); + static enum drm_connector_status anx7625_bridge_detect(struct drm_bridge *bridge) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; + enum drm_connector_status status; DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n"); - return anx7625_sink_detect(ctx); + status = anx7625_sink_detect(ctx); + anx7625_audio_update_connector_status(ctx, status); + return status; } static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index e226acc5c1..8a91ef0ae0 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2059,6 +2059,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, mhdp_state = to_cdns_mhdp_bridge_state(new_state); mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); + if (!mhdp_state->current_mode) + return; + drm_mode_set_name(mhdp_state->current_mode); dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name); diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index 82d23e4df0..ff3284b6b1 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -563,10 +563,8 @@ static int chipone_dsi_host_attach(struct chipone *icn) host = of_find_mipi_dsi_host_by_node(host_node); of_node_put(host_node); - if (!host) { - dev_err(dev, "failed to find dsi host\n"); - return -EPROBE_DEFER; - } + if (!host) + return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"); dsi = mipi_dsi_device_register_full(host, &info); if (IS_ERR(dsi)) { diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig index 5965e80275..8dd89efa8e 100644 --- a/drivers/gpu/drm/bridge/imx/Kconfig +++ b/drivers/gpu/drm/bridge/imx/Kconfig @@ -8,8 +8,8 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE depends on OF depends on COMMON_CLK select DRM_DW_HDMI - select DRM_IMX8MP_HDMI_PVI - select PHY_FSL_SAMSUNG_HDMI_PHY + imply DRM_IMX8MP_HDMI_PVI + imply PHY_FSL_SAMSUNG_HDMI_PHY help Choose this to enable support for the internal HDMI encoder found on the i.MX8MP SoC. diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 4b2ae27f0a..1a9defa156 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -494,10 +494,8 @@ static int lt8912_attach_dsi(struct lt8912 *lt) }; host = of_find_mipi_dsi_host_by_node(lt->host_node); - if (!host) { - dev_err(dev, "failed to find dsi host\n"); - return -EPROBE_DEFER; - } + if (!host) + return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"); dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index a9c7e2b07e..b99fe87ec7 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -761,10 +761,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); - if (!host) { - dev_err(lt9611->dev, "failed to find dsi host\n"); - return ERR_PTR(-EPROBE_DEFER); - } + if (!host) + return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n")); dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index f4f593ad8f..ab702471f3 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -266,10 +266,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); - if (!host) { - dev_err(dev, "failed to find dsi host\n"); - return ERR_PTR(-EPROBE_DEFER); - } + if (!host) + return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n")); dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 7f41525f7a..3d6e8f096a 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -358,9 +358,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation); static void devm_drm_panel_bridge_release(struct device *dev, void *res) { - struct drm_bridge **bridge = res; + struct drm_bridge *bridge = *(struct drm_bridge **)res; - drm_panel_bridge_remove(*bridge); + if (!bridge) + return; + + drm_bridge_remove(bridge); } /** diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index 90a89d70d8..c737670631 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -454,10 +454,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge) dev_dbg(tc->dev, "bus_formats %04x bpc %d\n", connector->display_info.bus_formats[0], tc->bpc); - /* - * Default hardware register settings of tc358775 configured - * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format - */ if (connector->display_info.bus_formats[0] == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) { /* VESA-24 */ @@ -468,14 +464,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge) d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2)); d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0)); d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6)); - } else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */ - d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3)); - d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0)); - d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0)); - d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0)); - d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2)); - d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0)); - d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0)); + } else { + /* JEIDA-18 and JEIDA-24 */ + d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5)); + d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2)); + d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1)); + d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2)); + d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4)); + d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0)); + d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0)); } d2l_write(tc->i2c, VFUEN, VFUEN_EN); @@ -610,10 +607,8 @@ static int tc_attach_host(struct tc_data *tc) }; host = of_find_mipi_dsi_host_by_node(tc->host_node); - if (!host) { - dev_err(dev, "failed to find dsi host\n"); - return -EPROBE_DEFER; - } + if (!host) + return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"); dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c index ca3348109b..6b559e0713 100644 --- a/drivers/gpu/drm/bridge/ti-dlpc3433.c +++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c @@ -319,12 +319,11 @@ static int dlpc_host_attach(struct dlpc *dlpc) .channel = 0, .node = NULL, }; + int ret; host = of_find_mipi_dsi_host_by_node(dlpc->host_node); - if (!host) { - DRM_DEV_ERROR(dev, "failed to find dsi host\n"); - return -EPROBE_DEFER; - } + if (!host) + return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"); dlpc->dsi = mipi_dsi_device_register_full(host, &info); if (IS_ERR(dlpc->dsi)) { @@ -336,7 +335,11 @@ static int dlpc_host_attach(struct dlpc *dlpc) dlpc->dsi->format = MIPI_DSI_FMT_RGB565; dlpc->dsi->lanes = dlpc->dsi_lanes; - return devm_mipi_dsi_attach(dev, dlpc->dsi); + ret = devm_mipi_dsi_attach(dev, dlpc->dsi); + if (ret) + DRM_DEV_ERROR(dev, "failed to attach dsi host\n"); + + return ret; } static int dlpc3433_probe(struct i2c_client *client) @@ -367,10 +370,8 @@ static int dlpc3433_probe(struct i2c_client *client) drm_bridge_add(&dlpc->bridge); ret = dlpc_host_attach(dlpc); - if (ret) { - DRM_DEV_ERROR(dev, "failed to attach dsi host\n"); + if (ret) goto err_remove_bridge; - } return 0; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 4814b7b6d1..57a7ed13f9 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -478,7 +478,6 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret); /* On failure, disable PLL again and exit. */ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00); - regulator_disable(ctx->vcc); return; } diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 0857773e5c..8bc63912fd 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -252,11 +252,11 @@ i915:cml: i915:tgl: extends: - .i915 - parallel: 8 + parallel: 5 variables: - DEVICE_TYPE: asus-cx9400-volteer + DEVICE_TYPE: acer-cp514-2h-1130g7-volteer GPU_VERSION: tgl - RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer + RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer .amdgpu: extends: diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 03d5282094..95fd18f24e 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -3421,7 +3421,6 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2); /** * drm_dp_add_payload_part2() - Execute payload update part 2 * @mgr: Manager to use. - * @state: The global atomic state * @payload: The payload to update * * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this @@ -3430,14 +3429,13 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2); * Returns: 0 on success, negative error code on failure. */ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, - struct drm_atomic_state *state, struct drm_dp_mst_atomic_payload *payload) { int ret = 0; /* Skip failed payloads */ if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) { - drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", + drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", payload->port->connector->name); return -EIO; } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 521a71c61b..17ed94885d 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -687,11 +687,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, */ list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { - if (next->pre_enable_prev_first) { + if (!next->pre_enable_prev_first) { next = list_prev_entry(next, chain_node); limit = next; break; } + + if (list_is_last(&next->chain_node, + &encoder->bridge_chain)) { + limit = next; + break; + } } /* Call these bridges in reverse order */ @@ -774,7 +780,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, /* Found first bridge that does NOT * request prev to be enabled first */ - limit = list_prev_entry(next, chain_node); + limit = next; break; } } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 923c442315..9064cdeb13 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -7324,7 +7324,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector, static bool displayid_is_tiled_block(const struct displayid_iter *iter, const struct displayid_block *block) { - return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 && + return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 && block->tag == DATA_BLOCK_TILED_DISPLAY) || (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 && block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY); diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c index d647d89764..b4659cd628 100644 --- a/drivers/gpu/drm/drm_fbdev_generic.c +++ b/drivers/gpu/drm/drm_fbdev_generic.c @@ -113,7 +113,6 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper, /* screen */ info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; info->screen_buffer = screen_buffer; - info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer)); info->fix.smem_len = screen_size; /* deferred I/O */ diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index e435f986cd..1ff0678be7 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -610,6 +610,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return ret; } + if (is_cow_mapping(vma->vm_flags)) + return -EINVAL; + dma_resv_lock(shmem->base.resv, NULL); ret = drm_gem_shmem_get_pages(shmem); dma_resv_unlock(shmem->base.resv); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index ef6e416522..9874ff6d47 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -654,7 +654,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size); * * Return: 0 on success or a negative error code on failure. */ -ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable) +int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable) { /* Note: Needs updating for non-default PPS or algorithm */ u8 tx[2] = { enable << 0, 0 }; @@ -679,8 +679,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode); * * Return: 0 on success or a negative error code on failure. */ -ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, - const struct drm_dsc_picture_parameter_set *pps) +int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, + const struct drm_dsc_picture_parameter_set *pps) { struct mipi_dsi_msg msg = { .channel = dsi->channel, diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index f5bbba9ad2..b1e9a70217 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector) struct vidi_context *ctx = ctx_from_connector(connector); struct edid *edid; int edid_len; + int count; /* * the edid data comes from user side and it would be set @@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector) drm_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + count = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return count; } static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index b1d02dec37..6385202cd2 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector) int ret; if (!hdata->ddc_adpt) - return 0; + goto no_edid; edid = drm_get_edid(connector, hdata->ddc_adpt); if (!edid) - return 0; + goto no_edid; hdata->dvi_mode = !connector->display_info.is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", @@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector) kfree(edid); return ret; + +no_edid: + return drm_add_modes_noedid(connector, 640, 480); } static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index ed81e1466c..40e7d86267 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = { static void i915_audio_component_init(struct drm_i915_private *i915) { u32 aud_freq, aud_freq_init; - int ret; - - ret = component_add_typed(i915->drm.dev, - &i915_audio_component_bind_ops, - I915_COMPONENT_AUDIO); - if (ret < 0) { - drm_err(&i915->drm, - "failed to add audio component (%d)\n", ret); - /* continue with reduced functionality */ - return; - } if (DISPLAY_VER(i915) >= 9) { aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL); @@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915) /* init with current cdclk */ intel_audio_cdclk_change_post(i915); +} + +static void i915_audio_component_register(struct drm_i915_private *i915) +{ + int ret; + + ret = component_add_typed(i915->drm.dev, + &i915_audio_component_bind_ops, + I915_COMPONENT_AUDIO); + if (ret < 0) { + drm_err(&i915->drm, + "failed to add audio component (%d)\n", ret); + /* continue with reduced functionality */ + return; + } i915->display.audio.component_registered = true; } @@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915) i915_audio_component_init(i915); } +void intel_audio_register(struct drm_i915_private *i915) +{ + if (!i915->display.audio.lpe.platdev) + i915_audio_component_register(i915); +} + /** * intel_audio_deinit() - deinitialize the audio driver * @i915: the i915 drm device private data diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h index 9327954b80..576c061d72 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.h +++ b/drivers/gpu/drm/i915/display/intel_audio.h @@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder, void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv); void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv); void intel_audio_init(struct drm_i915_private *dev_priv); +void intel_audio_register(struct drm_i915_private *i915); void intel_audio_deinit(struct drm_i915_private *dev_priv); void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 87dd07e0d1..6da5e85abe 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -542,6 +542,8 @@ void intel_display_driver_register(struct drm_i915_private *i915) intel_display_driver_enable_user_access(i915); + intel_audio_register(i915); + intel_display_debugfs_register(i915); /* diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index e583515f9b..950f86fb13 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -431,6 +431,10 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) struct intel_encoder *encoder = &intel_dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + /* eDP MSO is not compatible with joiner */ + if (intel_dp->mso_link_count) + return false; + return DISPLAY_VER(dev_priv) >= 12 || (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index b651c990af..8264ff7fb6 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -1160,7 +1160,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, if (first_mst_stream) intel_ddi_wait_for_fec_status(encoder, pipe_config, true); - drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, + drm_dp_add_payload_part2(&intel_dp->mst_mgr, drm_atomic_get_mst_payload_state(mst_state, connector->port)); if (DISPLAY_VER(dev_priv) >= 12) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 3560a062d2..5d7446a48a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj); static inline bool i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) { - return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); + /* TODO: make DPT shrinkable when it has no bound vmas */ + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) && + !obj->is_dpt; } static inline bool diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index d650beb8ed..20b9b04ec1 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work) i915_request_put(rq); } + /* Lazy irq enabling after HW submission */ if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers)) intel_breadcrumbs_arm_irq(b); + + /* And confirm that we still want irqs enabled before we yield */ + if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) + intel_breadcrumbs_disarm_irq(b); } struct intel_breadcrumbs * @@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b) return; /* Kick the work once more to drain the signalers, and disarm the irq */ - irq_work_sync(&b->irq_work); - while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) { - local_irq_disable(); - signal_irq_work(&b->irq_work); - local_irq_enable(); - cond_resched(); - } + irq_work_queue(&b->irq_work); } void intel_breadcrumbs_free(struct kref *kref) @@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq) * the request as it may have completed and raised the interrupt as * we were attaching it into the lists. */ - if (!b->irq_armed || __i915_request_is_complete(rq)) + if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq)) irq_work_queue(&b->irq_work); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7a6dc371c3..bc6209df0f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -919,6 +919,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) if (IS_DG2(gt->i915)) { u8 first_ccs = __ffs(CCS_MASK(gt)); + /* + * Store the number of active cslices before + * changing the CCS engine configuration + */ + gt->ccs.cslices = CCS_MASK(gt); + /* Mask off all the CCS engine */ info->engine_mask &= ~GENMASK(CCS3, CCS0); /* Put back in the first CCS engine */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c index 99b71bb7da..3c62a44e91 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c @@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt) /* Build the value for the fixed CCS load balancing */ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) { - if (CCS_MASK(gt) & BIT(cslice)) + if (gt->ccs.cslices & BIT(cslice)) /* * If available, assign the cslice * to the first available engine... diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index def7dd0eb6..cfdd2ad5e9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -207,6 +207,14 @@ struct intel_gt { [MAX_ENGINE_INSTANCE + 1]; enum intel_submission_method submission_method; + struct { + /* + * Mask of the non fused CCS slices + * to be used for the load balancing + */ + intel_engine_mask_t cslices; + } ccs; + /* * Default address space (either GGTT or ppGTT depending on arch). * diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h index 58012edd4e..4f4f53c42a 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h @@ -29,9 +29,9 @@ */ #define GUC_KLV_LEN_MIN 1u -#define GUC_KLV_0_KEY (0xffff << 16) -#define GUC_KLV_0_LEN (0xffff << 0) -#define GUC_KLV_n_VALUE (0xffffffff << 0) +#define GUC_KLV_0_KEY (0xffffu << 16) +#define GUC_KLV_0_LEN (0xffffu << 0) +#define GUC_KLV_n_VALUE (0xffffffffu << 0) /** * DOC: GuC Self Config KLVs diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index b758fd110c..c0662a022f 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -793,7 +793,7 @@ void i915_hwmon_register(struct drm_i915_private *i915) if (!IS_DGFX(i915)) return; - hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL); + hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL); if (!hwmon) return; @@ -819,14 +819,12 @@ void i915_hwmon_register(struct drm_i915_private *i915) hwm_get_preregistration_info(i915); /* hwmon_dev points to device hwmon<i> */ - hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name, - ddat, - &hwm_chip_info, - hwm_groups); - if (IS_ERR(hwmon_dev)) { - i915->hwmon = NULL; - return; - } + hwmon_dev = hwmon_device_register_with_info(dev, ddat->name, + ddat, + &hwm_chip_info, + hwm_groups); + if (IS_ERR(hwmon_dev)) + goto err; ddat->hwmon_dev = hwmon_dev; @@ -839,16 +837,36 @@ void i915_hwmon_register(struct drm_i915_private *i915) if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0)) continue; - hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name, - ddat_gt, - &hwm_gt_chip_info, - NULL); + hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name, + ddat_gt, + &hwm_gt_chip_info, + NULL); if (!IS_ERR(hwmon_dev)) ddat_gt->hwmon_dev = hwmon_dev; } + return; +err: + i915_hwmon_unregister(i915); } void i915_hwmon_unregister(struct drm_i915_private *i915) { - fetch_and_zero(&i915->hwmon); + struct i915_hwmon *hwmon = i915->hwmon; + struct intel_gt *gt; + int i; + + if (!hwmon) + return; + + for_each_gt(gt, i915, i) + if (hwmon->ddat_gt[i].hwmon_dev) + hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev); + + if (hwmon->ddat.hwmon_dev) + hwmon_device_unregister(hwmon->ddat.hwmon_dev); + + mutex_destroy(&hwmon->hwmon_lock); + + kfree(i915->hwmon); + i915->hwmon = NULL; } diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c index b7fef3c797..4f99b4af87 100644 --- a/drivers/gpu/drm/imagination/pvr_vm_mips.c +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c @@ -46,7 +46,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev) if (!mips_data) return -ENOMEM; - for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) { + for (page_nr = 0; page_nr < PVR_MIPS_PT_PAGE_COUNT; page_nr++) { mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!mips_data->pt_pages[page_nr]) { err = -ENOMEM; @@ -102,7 +102,7 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev) int page_nr; vunmap(mips_data->pt); - for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) { + for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) { dma_unmap_page(from_pvr_device(pvr_dev)->dev, mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c index fbc43f243c..6d000504e1 100644 --- a/drivers/gpu/drm/lima/lima_bcast.c +++ b/drivers/gpu/drm/lima/lima_bcast.c @@ -43,6 +43,18 @@ void lima_bcast_suspend(struct lima_ip *ip) } +int lima_bcast_mask_irq(struct lima_ip *ip) +{ + bcast_write(LIMA_BCAST_BROADCAST_MASK, 0); + bcast_write(LIMA_BCAST_INTERRUPT_MASK, 0); + return 0; +} + +int lima_bcast_reset(struct lima_ip *ip) +{ + return lima_bcast_hw_init(ip); +} + int lima_bcast_init(struct lima_ip *ip) { int i; diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h index 465ee587bc..cd08841e47 100644 --- a/drivers/gpu/drm/lima/lima_bcast.h +++ b/drivers/gpu/drm/lima/lima_bcast.h @@ -13,4 +13,7 @@ void lima_bcast_fini(struct lima_ip *ip); void lima_bcast_enable(struct lima_device *dev, int num_pp); +int lima_bcast_mask_irq(struct lima_ip *ip); +int lima_bcast_reset(struct lima_ip *ip); + #endif diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c index 6b354e2fb6..e152950715 100644 --- a/drivers/gpu/drm/lima/lima_gp.c +++ b/drivers/gpu/drm/lima/lima_gp.c @@ -233,6 +233,13 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe) lima_sched_pipe_task_done(pipe); } +static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe) +{ + struct lima_ip *ip = pipe->processor[0]; + + gp_write(LIMA_GP_INT_MASK, 0); +} + static int lima_gp_task_recover(struct lima_sched_pipe *pipe) { struct lima_ip *ip = pipe->processor[0]; @@ -365,6 +372,7 @@ int lima_gp_pipe_init(struct lima_device *dev) pipe->task_error = lima_gp_task_error; pipe->task_mmu_error = lima_gp_task_mmu_error; pipe->task_recover = lima_gp_task_recover; + pipe->task_mask_irq = lima_gp_task_mask_irq; return 0; } diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c index d0d2db0ef1..a4a2ffe652 100644 --- a/drivers/gpu/drm/lima/lima_pp.c +++ b/drivers/gpu/drm/lima/lima_pp.c @@ -429,6 +429,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe) lima_pp_hard_reset(ip); } + + if (pipe->bcast_processor) + lima_bcast_reset(pipe->bcast_processor); } static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe) @@ -437,6 +440,20 @@ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe) lima_sched_pipe_task_done(pipe); } +static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe) +{ + int i; + + for (i = 0; i < pipe->num_processor; i++) { + struct lima_ip *ip = pipe->processor[i]; + + pp_write(LIMA_PP_INT_MASK, 0); + } + + if (pipe->bcast_processor) + lima_bcast_mask_irq(pipe->bcast_processor); +} + static struct kmem_cache *lima_pp_task_slab; static int lima_pp_task_slab_refcnt; @@ -468,6 +485,7 @@ int lima_pp_pipe_init(struct lima_device *dev) pipe->task_fini = lima_pp_task_fini; pipe->task_error = lima_pp_task_error; pipe->task_mmu_error = lima_pp_task_mmu_error; + pipe->task_mask_irq = lima_pp_task_mask_irq; return 0; } diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 00b19adfc8..bbf3f8feab 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -422,12 +422,21 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job */ for (i = 0; i < pipe->num_processor; i++) synchronize_irq(pipe->processor[i]->irq); + if (pipe->bcast_processor) + synchronize_irq(pipe->bcast_processor->irq); if (dma_fence_is_signaled(task->fence)) { DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip)); return DRM_GPU_SCHED_STAT_NOMINAL; } + /* + * The task might still finish while this timeout handler runs. + * To prevent a race condition on its completion, mask all irqs + * on the running core until the next hard reset completes. + */ + pipe->task_mask_irq(pipe); + if (!pipe->error) DRM_ERROR("%s job timeout\n", lima_ip_name(ip)); diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h index 6bd4f3b701..85b23ba901 100644 --- a/drivers/gpu/drm/lima/lima_sched.h +++ b/drivers/gpu/drm/lima/lima_sched.h @@ -80,6 +80,7 @@ struct lima_sched_pipe { void (*task_error)(struct lima_sched_pipe *pipe); void (*task_mmu_error)(struct lima_sched_pipe *pipe); int (*task_recover)(struct lima_sched_pipe *pipe); + void (*task_mask_irq)(struct lima_sched_pipe *pipe); struct work_struct recover_work; }; diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index 0ba7210263..5363669564 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2104,7 +2104,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux, if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP && !mtk_dp->train_info.cable_plugged_in) { - ret = -EAGAIN; + ret = -EIO; goto err; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index a04499c4f9..29207b2756 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -1009,10 +1009,10 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->mmsys_dev = priv->mmsys_dev; mtk_crtc->ddp_comp_nr = path_len; - mtk_crtc->ddp_comp = devm_kmalloc_array(dev, - mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0), - sizeof(*mtk_crtc->ddp_comp), - GFP_KERNEL); + mtk_crtc->ddp_comp = devm_kcalloc(dev, + mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0), + sizeof(*mtk_crtc->ddp_comp), + GFP_KERNEL); if (!mtk_crtc->ddp_comp) return -ENOMEM; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 4f2e3feabc..1bf229615b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -38,6 +38,9 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, size = round_up(size, PAGE_SIZE); + if (size == 0) + return ERR_PTR(-EINVAL); + mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); if (!mtk_gem_obj) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c index a6bc1bdb3d..a10cff3ca1 100644 --- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c +++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c @@ -95,6 +95,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data) return ret; } + clk_disable_unprepare(mipi_dsi->px_clk); ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000); if (ret) { @@ -103,6 +104,12 @@ static int dw_mipi_dsi_phy_init(void *priv_data) return ret; } + ret = clk_prepare_enable(mipi_dsi->px_clk); + if (ret) { + dev_err(mipi_dsi->dev, "Failed to enable DSI Pixel clock (ret %d)\n", ret); + return ret; + } + switch (mipi_dsi->dsi_device->format) { case MIPI_DSI_FMT_RGB888: dpi_data_format = DPI_COLOR_24BIT; diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index 2a82119eb5..2a942dc6a6 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, FREQ_1000_1001(params[i].pixel_freq)); DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n", i, params[i].phy_freq, - FREQ_1000_1001(params[i].phy_freq/10)*10); + FREQ_1000_1001(params[i].phy_freq/1000)*1000); /* Match strict frequency */ if (phy_freq == params[i].phy_freq && vclk_freq == params[i].vclk_freq) return MODE_OK; /* Match 1000/1001 variant */ - if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) && + if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) && vclk_freq == FREQ_1000_1001(params[i].vclk_freq)) return MODE_OK; } @@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, for (freq = 0 ; params[freq].pixel_freq ; ++freq) { if ((phy_freq == params[freq].phy_freq || - phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) && + phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) && (vclk_freq == params[freq].vclk_freq || vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) { if (vclk_freq != params[freq].vclk_freq) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index cf0b1de1c0..7b72327df7 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -284,7 +284,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); - get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), + get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, rbmemptr_stats(ring, index, alwayson_start)); @@ -330,7 +330,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_MARKER, 1); OUT_RING(ring, 0x00e); /* IB1LIST end */ - get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), + get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_end)); get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, rbmemptr_stats(ring, index, alwayson_end)); @@ -3062,7 +3062,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) ret = a6xx_set_supported_hw(&pdev->dev, config->info); if (ret) { - a6xx_destroy(&(a6xx_gpu->base.base)); + a6xx_llc_slices_destroy(a6xx_gpu); + kfree(a6xx_gpu); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index fc1d5736d7..489be1c0c7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -448,9 +448,6 @@ static void dpu_encoder_phys_cmd_enable_helper( _dpu_encoder_phys_cmd_pingpong_config(phys_enc); - if (!dpu_encoder_phys_cmd_is_master(phys_enc)) - return; - ctl = phys_enc->hw_ctl; ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index a06f69d0b2..2e50049f2f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -545,6 +545,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, { struct dpu_hw_blk_reg_map *c = &ctx->hw; u32 intf_active = 0; + u32 dsc_active = 0; u32 wb_active = 0; u32 mode_sel = 0; @@ -560,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); + dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE); if (cfg->intf) intf_active |= BIT(cfg->intf - INTF_0); @@ -567,17 +569,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, if (cfg->wb) wb_active |= BIT(cfg->wb - WB_0); + if (cfg->dsc) + dsc_active |= cfg->dsc; + DPU_REG_WRITE(c, CTL_TOP, mode_sel); DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); + DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active); if (cfg->merge_3d) DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0)); - if (cfg->dsc) - DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc); - if (cfg->cdm) DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index 6a0a74832f..b85881aab0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -223,9 +223,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); - if (!irq_entry->cb) + if (!irq_entry->cb) { DRM_ERROR("no registered cb, IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); + return; + } atomic_inc(&irq_entry->count); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index adbd5a3673..707489776e 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -38,6 +38,7 @@ struct dp_aux_private { bool no_send_stop; bool initted; bool is_edp; + bool enable_xfers; u32 offset; u32 segment; @@ -305,6 +306,17 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } /* + * If we're using DP and an external display isn't connected then the + * transfer won't succeed. Return right away. If we don't do this we + * can end up with long timeouts if someone tries to access the DP AUX + * character device when no DP device is connected. + */ + if (!aux->is_edp && !aux->enable_xfers) { + ret = -ENXIO; + goto exit; + } + + /* * For eDP it's important to give a reasonably long wait here for HPD * to be asserted. This is because the panel driver may have _just_ * turned on the panel and then tried to do an AUX transfer. The panel @@ -313,7 +325,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, * avoid ever doing the extra long wait for DP. */ if (aux->is_edp) { - ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog); + ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, + 500000); if (ret) { DRM_DEBUG_DP("Panel not ready for aux transactions\n"); goto exit; @@ -436,6 +449,14 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux) return IRQ_HANDLED; } +void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled) +{ + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + aux->enable_xfers = enabled; +} + void dp_aux_reconfig(struct drm_dp_aux *dp_aux) { struct dp_aux_private *aux; @@ -513,7 +534,7 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, aux = container_of(dp_aux, struct dp_aux_private, dp_aux); pm_runtime_get_sync(aux->dev); - ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog); + ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); pm_runtime_put_sync(aux->dev); return ret; diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h index f47d591c1f..4f65e892a8 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.h +++ b/drivers/gpu/drm/msm/dp/dp_aux.h @@ -12,6 +12,7 @@ int dp_aux_register(struct drm_dp_aux *dp_aux); void dp_aux_unregister(struct drm_dp_aux *dp_aux); irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux); +void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled); void dp_aux_init(struct drm_dp_aux *dp_aux); void dp_aux_deinit(struct drm_dp_aux *dp_aux); void dp_aux_reconfig(struct drm_dp_aux *dp_aux); diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 3e7c84cdef..628c8181dd 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -263,17 +263,18 @@ void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable) dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); } -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog) +int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, + unsigned long wait_us) { u32 state; struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); - /* poll for hpd connected status every 2ms and timeout after 500ms */ + /* poll for hpd connected status every 2ms and timeout after wait_us */ return readl_poll_timeout(catalog->io.aux.base + REG_DP_DP_HPD_INT_STATUS, state, state & DP_DP_HPD_STATE_STATUS_CONNECTED, - 2000, 500000); + min(wait_us, 2000), wait_us); } static void dump_regs(void __iomem *base, int len) diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 75ec290127..72a8581060 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -87,7 +87,8 @@ int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable); -int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog); +int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog, + unsigned long wait_us); u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog); /* DP Controller APIs */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index c4dda1faef..112c7e54fc 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1052,14 +1052,14 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) if (ret) return ret; - if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) { + if (voltage_swing_level >= DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(ctrl->drm_dev, "max. voltage swing level reached %d\n", voltage_swing_level); max_level_reached |= DP_TRAIN_MAX_SWING_REACHED; } - if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) { + if (pre_emphasis_level >= DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(ctrl->drm_dev, "max. pre-emphasis level reached %d\n", pre_emphasis_level); @@ -1150,7 +1150,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, } if (ctrl->link->phy_params.v_level >= - DP_TRAIN_VOLTAGE_SWING_MAX) { + DP_TRAIN_LEVEL_MAX) { DRM_ERROR_RATELIMITED("max v_level reached\n"); return -EAGAIN; } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index ffbfde9225..36a0ef1cdc 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -555,6 +555,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) int ret; struct platform_device *pdev = dp->dp_display.pdev; + dp_aux_enable_xfers(dp->aux, true); + mutex_lock(&dp->event_mutex); state = dp->hpd_state; @@ -620,6 +622,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) u32 state; struct platform_device *pdev = dp->dp_display.pdev; + dp_aux_enable_xfers(dp->aux, false); + mutex_lock(&dp->event_mutex); state = dp->hpd_state; diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 49dfac1fd1..ea911d9244 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -1109,6 +1109,7 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link) int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) { int i; + u8 max_p_level; int v_max = 0, p_max = 0; struct dp_link_private *link; @@ -1140,30 +1141,29 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) * Adjust the voltage swing and pre-emphasis level combination to within * the allowable range. */ - if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) { + if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested vSwingLevel=%d, change to %d\n", dp_link->phy_params.v_level, - DP_TRAIN_VOLTAGE_SWING_MAX); - dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX; + DP_TRAIN_LEVEL_MAX); + dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX; } - if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) { + if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", dp_link->phy_params.p_level, - DP_TRAIN_PRE_EMPHASIS_MAX); - dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX; + DP_TRAIN_LEVEL_MAX); + dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX; } - if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1) - && (dp_link->phy_params.v_level == - DP_TRAIN_VOLTAGE_SWING_LVL_2)) { + max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level; + if (dp_link->phy_params.p_level > max_p_level) { drm_dbg_dp(link->drm_dev, "Requested preEmphasisLevel=%d, change to %d\n", dp_link->phy_params.p_level, - DP_TRAIN_PRE_EMPHASIS_LVL_1); - dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1; + max_p_level); + dp_link->phy_params.p_level = max_p_level; } drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n", diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index 83da170bc5..42aed9c90b 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -19,19 +19,7 @@ struct dp_link_info { unsigned long capabilities; }; -enum dp_link_voltage_level { - DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0, - DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1, - DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2, - DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2, -}; - -enum dp_link_preemaphasis_level { - DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0, - DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1, - DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2, - DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2, -}; +#define DP_TRAIN_LEVEL_MAX 3 struct dp_link_test_video { u32 test_video_pattern; diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 9d86a6aca6..c80be74cf1 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -356,8 +356,8 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) { int ret; - DBG("Set clk rates: pclk=%d, byteclk=%lu", - msm_host->mode->clock, msm_host->byte_clk_rate); + DBG("Set clk rates: pclk=%lu, byteclk=%lu", + msm_host->pixel_clk_rate, msm_host->byte_clk_rate); ret = dev_pm_opp_set_rate(&msm_host->pdev->dev, msm_host->byte_clk_rate); @@ -430,9 +430,9 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) { int ret; - DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu", - msm_host->mode->clock, msm_host->byte_clk_rate, - msm_host->esc_clk_rate, msm_host->src_clk_rate); + DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu", + msm_host->pixel_clk_rate, msm_host->byte_clk_rate, + msm_host->esc_clk_rate, msm_host->src_clk_rate); ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); if (ret) { diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index ea10bf8158..0f895b8a99 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -343,6 +343,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev) if (ret) return ret; + if (pm_runtime_suspended(dev)) + return 0; + return lcdif_rpm_suspend(dev); } @@ -350,7 +353,8 @@ static int __maybe_unused lcdif_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - lcdif_rpm_resume(dev); + if (!pm_runtime_suspended(dev)) + lcdif_rpm_resume(dev); return drm_mode_config_helper_resume(drm); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 13705c5f14..4b7497a875 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend) if (nv_two_heads(dev)) NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); if (!suspend) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 0c3d88ad0b..674dc567e1 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -915,7 +915,7 @@ nv50_msto_cleanup(struct drm_atomic_state *state, msto->disabled = false; drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload); } else if (msto->enabled) { - drm_dp_add_payload_part2(mgr, state, new_payload); + drm_dp_add_payload_part2(mgr, new_payload); msto->enabled = false; } } @@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend) nv50_mstm_fini(nouveau_encoder(encoder)); } - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); } diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 80f74ee0fc..47e53e17b4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) getparam->value = (u64)ttm_resource_manager_usage(vram_mgr); break; } + case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE: + getparam->value = 1; + break; default: NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index db8cbf6151..186add400e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, } nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); - if (!nouveau_cli_uvmm(cli) || internal) { - /* for BO noVM allocs, don't assign kinds */ - if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { - nvbo->kind = (tile_flags & 0x0000ff00) >> 8; - if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { - kfree(nvbo); - return ERR_PTR(-EINVAL); - } - nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; - } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - nvbo->kind = (tile_flags & 0x00007f00) >> 8; - nvbo->comp = (tile_flags & 0x00030000) >> 16; - if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { - kfree(nvbo); - return ERR_PTR(-EINVAL); - } - } else { - nvbo->zeta = (tile_flags & 0x00000007); + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { + nvbo->kind = (tile_flags & 0x0000ff00) >> 8; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + + nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; + } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + nvbo->kind = (tile_flags & 0x00007f00) >> 8; + nvbo->comp = (tile_flags & 0x00030000) >> 16; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); } - nvbo->mode = tile_mode; + } else { + nvbo->zeta = (tile_flags & 0x00000007); + } + nvbo->mode = tile_mode; + if (!nouveau_cli_uvmm(cli) || internal) { /* Determine the desirable target GPU page size for the buffer. */ for (i = 0; i < vmm->page_nr; i++) { /* Because we cannot currently allow VMM maps to fail @@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, } nvbo->page = vmm->page[pi].shift; } else { - /* reject other tile flags when in VM mode. */ - if (tile_mode) - return ERR_PTR(-EINVAL); - if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG) - return ERR_PTR(-EINVAL); - /* Determine the desirable target GPU page size for the buffer. */ for (i = 0; i < vmm->page_nr; i++) { /* Because we cannot currently allow VMM maps to fail diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index f28f9a8574..60c3224421 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); + if (drm->headless) + return; + spin_lock_irq(&drm->hpd_lock); drm->hpd_pending = ~0; spin_unlock_irq(&drm->hpd_lock); @@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) } drm_connector_list_iter_end(&conn_iter); - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); drm_kms_helper_poll_disable(dev); @@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev) /* no display hw */ if (ret == -ENODEV) { ret = 0; + drm->headless = true; goto disp_create_err; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e239c6bf4a..25fca98a20 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -276,6 +276,7 @@ struct nouveau_drm { /* modesetting */ struct nvbios vbios; struct nouveau_display *display; + bool headless; struct work_struct hpd_work; spinlock_t hpd_lock; u32 hpd_pending; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c index 6a0a4d3b89..027867c2a8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c @@ -1080,7 +1080,7 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize) ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl)); if (ret) { nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl); - return PTR_ERR(ctrl); + return ret; } memcpy(data, ctrl->data, size); diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index b715301ec7..6c49270cb2 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -4,7 +4,7 @@ config DRM_OMAP depends on DRM && OF depends on ARCH_OMAP2PLUS select DRM_KMS_HELPER - select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION + select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION select VIDEOMODE_HELPERS select HDMI default n diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 6b08b137af..523be34682 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work) omap_gem_roll(bo, fbi->var.yoffset * npages); } +FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev, + drm_fb_helper_damage_range, + drm_fb_helper_damage_area) + static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) { @@ -78,11 +82,9 @@ fallback: static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { - struct drm_fb_helper *helper = info->par; - struct drm_framebuffer *fb = helper->fb; - struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0); + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma); + return fb_deferred_io_mmap(info, vma); } static void omap_fbdev_fb_destroy(struct fb_info *info) @@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) DBG(); + fb_deferred_io_cleanup(info); drm_fb_helper_fini(helper); omap_gem_unpin(bo); @@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) kfree(fbdev); } +/* + * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap() + * because we use write-combine. + */ static const struct fb_ops omap_fb_ops = { .owner = THIS_MODULE, - __FB_DEFAULT_DMAMEM_OPS_RDWR, + __FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev), .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_setcmap = drm_fb_helper_setcmap, .fb_blank = drm_fb_helper_blank, .fb_pan_display = omap_fbdev_pan_display, - __FB_DEFAULT_DMAMEM_OPS_DRAW, + __FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev), .fb_ioctl = drm_fb_helper_ioctl, .fb_mmap = omap_fbdev_fb_mmap, .fb_destroy = omap_fbdev_fb_destroy, @@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, fbi->fix.smem_start = dma_addr; fbi->fix.smem_len = bo->size; + /* deferred I/O */ + helper->fbdefio.delay = HZ / 20; + helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; + + fbi->fbdefio = &helper->fbdefio; + ret = fb_deferred_io_init(fbi); + if (ret) + goto fail; + /* if we have DMM, then we can use it for scrolling by just * shuffling pages around in DMM rather than doing sw blit. */ @@ -238,8 +254,20 @@ fail: return ret; } +static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) +{ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->fb->funcs->dirty) + return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + + return 0; +} + static const struct drm_fb_helper_funcs omap_fb_helper_funcs = { .fb_probe = omap_fbdev_create, + .fb_dirty = omap_fbdev_dirty, }; static struct drm_fb_helper *get_fb(struct fb_info *fbi) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index d58f90bc48..745f3e48f0 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1865,6 +1865,13 @@ static const struct panel_delay delay_200_500_e50 = { .enable = 50, }; +static const struct panel_delay delay_200_500_e50_p2e200 = { + .hpd_absent = 200, + .unprepare = 500, + .enable = 50, + .prepare_to_enable = 200, +}; + static const struct panel_delay delay_200_500_e80 = { .hpd_absent = 200, .unprepare = 500, @@ -2034,7 +2041,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), - EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"), + EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"), EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"), EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"), diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c index 9d87cc1a35..1a26205701 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c @@ -295,8 +295,6 @@ static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx) mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef); mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02); - mipi_dsi_dcs_write_seq(dsi, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x29); ret = mipi_dsi_dcs_set_tear_on(dsi, 1); if (ret < 0) { @@ -326,7 +324,8 @@ static const struct drm_display_mode ltk050h3148w_mode = { static const struct ltk050h3146w_desc ltk050h3148w_data = { .mode = <k050h3148w_mode, .init = ltk050h3148w_init_sequence, - .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_VIDEO_BURST, }; static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c index 648ce92014..028fdac293 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c @@ -556,10 +556,8 @@ static int nt35950_probe(struct mipi_dsi_device *dsi) } dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r); of_node_put(dsi_r); - if (!dsi_r_host) { - dev_err(dev, "Cannot get secondary DSI host\n"); - return -EPROBE_DEFER; - } + if (!dsi_r_host) + return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n"); nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info); if (!nt->dsi[1]) { diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c index 76c2a8f671..9c336c7156 100644 --- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c +++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c @@ -109,19 +109,17 @@ static int atana33xc20_resume(struct device *dev) if (hpd_asserted < 0) ret = hpd_asserted; - if (ret) + if (ret) { dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret); - - return ret; - } - - if (p->aux->wait_hpd_asserted) { + goto error; + } + } else if (p->aux->wait_hpd_asserted) { ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US); - if (ret) + if (ret) { dev_warn(dev, "Controller error waiting for HPD: %d\n", ret); - - return ret; + goto error; + } } /* @@ -133,6 +131,12 @@ static int atana33xc20_resume(struct device *dev) * right times. */ return 0; + +error: + drm_dp_dpcd_set_powered(p->aux, false); + regulator_disable(p->supply); + + return ret; } static int atana33xc20_disable(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 20e3df1c59..e8fe5a6945 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2591,6 +2591,9 @@ static const struct panel_desc innolux_g121x1_l03 = { .unprepare = 200, .disable = 400, }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing innolux_g156hce_l01_timings = { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 88e80fe981..28bfc48a91 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = { static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = { .clock = 6000, .hdisplay = 240, - .hsync_start = 240 + 28, - .hsync_end = 240 + 28 + 10, - .htotal = 240 + 28 + 10 + 10, + .hsync_start = 240 + 38, + .hsync_end = 240 + 38 + 10, + .htotal = 240 + 38 + 10 + 10, .vdisplay = 280, - .vsync_start = 280 + 8, - .vsync_end = 280 + 8 + 4, - .vtotal = 280 + 8 + 4 + 4, - .width_mm = 43, - .height_mm = 37, + .vsync_start = 280 + 48, + .vsync_end = 280 + 48 + 4, + .vtotal = 280 + 48 + 4 + 4, + .width_mm = 37, + .height_mm = 43, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; @@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi) if (ret) return dev_err_probe(dev, ret, "Failed to get backlight\n"); - of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation); + ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation); + if (ret) + return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n"); drm_panel_add(&ctx->panel); diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index 21d27e6235..b11f7c5bbc 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev, for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { if (table[i].ulSupportedSCLK != 0) { + if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) + continue; vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = table[i].usVoltageID; vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index fdd768bbd4..62ebbdb162 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -706,6 +706,8 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win, const struct drm_format_info *info; u16 hor_scl_mode, ver_scl_mode; u16 hscl_filter_mode, vscl_filter_mode; + uint16_t cbcr_src_w = src_w; + uint16_t cbcr_src_h = src_h; u8 gt2 = 0; u8 gt4 = 0; u32 val; @@ -763,27 +765,27 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win, vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode); if (info->is_yuv) { - src_w /= info->hsub; - src_h /= info->vsub; + cbcr_src_w /= info->hsub; + cbcr_src_h /= info->vsub; gt4 = 0; gt2 = 0; - if (src_h >= (4 * dst_h)) { + if (cbcr_src_h >= (4 * dst_h)) { gt4 = 1; - src_h >>= 2; - } else if (src_h >= (2 * dst_h)) { + cbcr_src_h >>= 2; + } else if (cbcr_src_h >= (2 * dst_h)) { gt2 = 1; - src_h >>= 1; + cbcr_src_h >>= 1; } - hor_scl_mode = scl_get_scl_mode(src_w, dst_w); - ver_scl_mode = scl_get_scl_mode(src_h, dst_h); + hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w); + ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h); - val = vop2_scale_factor(src_w, dst_w); + val = vop2_scale_factor(cbcr_src_w, dst_w); vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val); - val = vop2_scale_factor(src_h, dst_h); + val = vop2_scale_factor(cbcr_src_h, dst_h); vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val); vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index d8751ea203..5f8d51b293 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -2740,6 +2740,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) index = 1; addr = of_get_address(dev->of_node, index, NULL, NULL); + if (!addr) + return -EINVAL; vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset; vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 58fb40c931..bea576434e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -956,13 +956,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) vmw_read(dev_priv, SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); - /* - * Workaround for low memory 2D VMs to compensate for the - * allocation taken by fbdev - */ - if (!(dev_priv->capabilities & SVGA_CAP_3D)) - mem_size *= 3; - dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->max_primary_mem = vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index b019a1a178..c1430e5547 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1066,9 +1066,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth); -bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height); int vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 84ae4e10a2..11755d143e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -216,7 +216,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, new_image = vmw_du_cursor_plane_acquire_image(new_vps); changed = false; - if (old_image && new_image) + if (old_image && new_image && old_image != new_image) changed = memcmp(old_image, new_image, size) != 0; return changed; @@ -2157,13 +2157,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv, return 0; } +static bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height) + u64 pitch, + u64 height) { - return ((u64) pitch * (u64) height) < (u64) - ((dev_priv->active_display_unit == vmw_du_screen_target) ? - dev_priv->max_primary_mem : dev_priv->vram_size); + return (pitch * height) < (u64)dev_priv->vram_size; } /** @@ -2859,25 +2858,18 @@ out_unref: enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + enum drm_mode_status ret; struct drm_device *dev = connector->dev; struct vmw_private *dev_priv = vmw_priv(dev); - u32 max_width = dev_priv->texture_max_width; - u32 max_height = dev_priv->texture_max_height; u32 assumed_cpp = 4; if (dev_priv->assume_16bpp) assumed_cpp = 2; - if (dev_priv->active_display_unit == vmw_du_screen_target) { - max_width = min(dev_priv->stdu_max_width, max_width); - max_height = min(dev_priv->stdu_max_height, max_height); - } - - if (max_width < mode->hdisplay) - return MODE_BAD_HVALUE; - - if (max_height < mode->vdisplay) - return MODE_BAD_VVALUE; + ret = drm_mode_validate_size(mode, dev_priv->texture_max_width, + dev_priv->texture_max_height); + if (ret != MODE_OK) + return ret; if (!vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * assumed_cpp, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 3c8414a13d..dbc44ecbd1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -41,7 +41,14 @@ #define vmw_connector_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.connector) - +/* + * Some renderers such as llvmpipe will align the width and height of their + * buffers to match their tile size. We need to keep this in mind when exposing + * modes to userspace so that this possible over-allocation will not exceed + * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the + * tile size of current renderers. + */ +#define GPU_TILE_SIZE 64 enum stdu_content_type { SAME_AS_DISPLAY = 0, @@ -830,7 +837,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector) vmw_stdu_destroy(vmw_connector_to_stdu(connector)); } +static enum drm_mode_status +vmw_stdu_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + enum drm_mode_status ret; + struct drm_device *dev = connector->dev; + struct vmw_private *dev_priv = vmw_priv(dev); + u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4; + /* Align width and height to account for GPU tile over-alignment */ + u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) * + ALIGN(mode->vdisplay, GPU_TILE_SIZE) * + assumed_cpp; + required_mem = ALIGN(required_mem, PAGE_SIZE); + + ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width, + dev_priv->stdu_max_height); + if (ret != MODE_OK) + return ret; + ret = drm_mode_validate_size(mode, dev_priv->texture_max_width, + dev_priv->texture_max_height); + if (ret != MODE_OK) + return ret; + + if (required_mem > dev_priv->max_primary_mem) + return MODE_MEM; + + if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE) + return MODE_MEM; + + if (required_mem > dev_priv->max_mob_size) + return MODE_MEM; + + return MODE_OK; +} static const struct drm_connector_funcs vmw_stdu_connector_funcs = { .dpms = vmw_du_connector_dpms, @@ -846,7 +887,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = { static const struct drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = { .get_modes = vmw_connector_get_modes, - .mode_valid = vmw_connector_mode_valid + .mode_valid = vmw_stdu_connector_mode_valid }; diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c index 7c124475c4..a35e0781b7 100644 --- a/drivers/gpu/drm/xe/xe_bb.c +++ b/drivers/gpu/drm/xe/xe_bb.c @@ -96,7 +96,8 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q, { u64 addr = xe_sa_bo_gpu_addr(bb->bo); - xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION)); + xe_gt_assert(q->gt, !xe_sched_job_is_migration(q)); + xe_gt_assert(q->gt, q->width == 1); return __xe_bb_create_job(q, bb, &addr); } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index d32ff3857e..b3b37ed832 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -389,8 +389,14 @@ mask_err: return err; } -/* - * Initialize MMIO resources that don't require any knowledge about tile count. +/** + * xe_device_probe_early: Device early probe + * @xe: xe device instance + * + * Initialize MMIO resources that don't require any + * knowledge about tile count. Also initialize pcode + * + * Return: 0 on success, error code on failure */ int xe_device_probe_early(struct xe_device *xe) { @@ -404,6 +410,10 @@ int xe_device_probe_early(struct xe_device *xe) if (err) return err; + err = xe_pcode_probe_early(xe); + if (err) + return err; + return 0; } @@ -482,11 +492,8 @@ int xe_device_probe(struct xe_device *xe) if (err) return err; - for_each_gt(gt, xe, id) { - err = xe_pcode_probe(gt); - if (err) - return err; - } + for_each_gt(gt, xe, id) + xe_pcode_init(gt); err = xe_display_init_noirq(xe); if (err) diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 9fcae65b64..e7a39ad7ad 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -126,6 +126,13 @@ static const struct attribute *gt_idle_attrs[] = { static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg) { struct kobject *kobj = arg; + struct xe_gt *gt = kobj_to_gt(kobj->parent); + + if (gt_to_xe(gt)->info.skip_guc_pc) { + XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + xe_gt_idle_disable_c6(gt); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } sysfs_remove_files(kobj, gt_idle_attrs); kobject_put(kobj); @@ -184,7 +191,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt) void xe_gt_idle_disable_c6(struct xe_gt *gt) { xe_device_assert_mem_access(gt_to_xe(gt)); - xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_mmio_write32(gt, PG_ENABLE, 0); xe_mmio_write32(gt, RC_CONTROL, 0); diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 0d2a2dd13f..a38f59b435 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -643,8 +643,6 @@ int xe_guc_enable_communication(struct xe_guc *guc) struct xe_device *xe = guc_to_xe(guc); int err; - guc_enable_irq(guc); - if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) { struct xe_gt *gt = guc_to_gt(guc); struct xe_tile *tile = gt_to_tile(gt); @@ -652,6 +650,8 @@ int xe_guc_enable_communication(struct xe_guc *guc) err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc); if (err) return err; + } else { + guc_enable_irq(guc); } xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK, diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 2839d68563..dd9d65f923 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -381,8 +381,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) struct xe_device *xe = gt_to_xe(gt); u32 freq; - xe_device_mem_access_get(gt_to_xe(gt)); - /* When in RC6, actual frequency reported will be 0. */ if (GRAPHICS_VERx100(xe) >= 1270) { freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1); @@ -394,8 +392,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) freq = decode_freq(freq); - xe_device_mem_access_put(gt_to_xe(gt)); - return freq; } @@ -412,14 +408,13 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) struct xe_gt *gt = pc_to_gt(pc); int ret; - xe_device_mem_access_get(gt_to_xe(gt)); /* * GuC SLPC plays with cur freq request when GuCRC is enabled * Block RC6 for a more reliable read. */ ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (ret) - goto out; + return ret; *freq = xe_mmio_read32(gt, RPNSWREQ); @@ -427,9 +422,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) *freq = decode_freq(*freq); XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); -out: - xe_device_mem_access_put(gt_to_xe(gt)); - return ret; + return 0; } /** @@ -451,12 +444,7 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc) */ u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc) { - struct xe_gt *gt = pc_to_gt(pc); - struct xe_device *xe = gt_to_xe(gt); - - xe_device_mem_access_get(xe); pc_update_rp_values(pc); - xe_device_mem_access_put(xe); return pc->rpe_freq; } @@ -485,7 +473,6 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) struct xe_gt *gt = pc_to_gt(pc); int ret; - xe_device_mem_access_get(pc_to_xe(pc)); mutex_lock(&pc->freq_lock); if (!pc->freq_ready) { /* Might be in the middle of a gt reset */ @@ -511,7 +498,6 @@ fw: XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); out: mutex_unlock(&pc->freq_lock); - xe_device_mem_access_put(pc_to_xe(pc)); return ret; } @@ -528,7 +514,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) { int ret; - xe_device_mem_access_get(pc_to_xe(pc)); mutex_lock(&pc->freq_lock); if (!pc->freq_ready) { /* Might be in the middle of a gt reset */ @@ -544,8 +529,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) out: mutex_unlock(&pc->freq_lock); - xe_device_mem_access_put(pc_to_xe(pc)); - return ret; } @@ -561,7 +544,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq) { int ret; - xe_device_mem_access_get(pc_to_xe(pc)); mutex_lock(&pc->freq_lock); if (!pc->freq_ready) { /* Might be in the middle of a gt reset */ @@ -577,7 +559,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq) out: mutex_unlock(&pc->freq_lock); - xe_device_mem_access_put(pc_to_xe(pc)); return ret; } @@ -594,7 +575,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) { int ret; - xe_device_mem_access_get(pc_to_xe(pc)); mutex_lock(&pc->freq_lock); if (!pc->freq_ready) { /* Might be in the middle of a gt reset */ @@ -610,7 +590,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) out: mutex_unlock(&pc->freq_lock); - xe_device_mem_access_put(pc_to_xe(pc)); return ret; } @@ -623,8 +602,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u32 reg, gt_c_state; - xe_device_mem_access_get(gt_to_xe(gt)); - if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1); gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg); @@ -633,8 +610,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) gt_c_state = REG_FIELD_GET(RCN_MASK, reg); } - xe_device_mem_access_put(gt_to_xe(gt)); - switch (gt_c_state) { case GT_C6: return GT_IDLE_C6; @@ -654,9 +629,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u32 reg; - xe_device_mem_access_get(gt_to_xe(gt)); reg = xe_mmio_read32(gt, GT_GFX_RC6); - xe_device_mem_access_put(gt_to_xe(gt)); return reg; } @@ -670,9 +643,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u64 reg; - xe_device_mem_access_get(gt_to_xe(gt)); reg = xe_mmio_read32(gt, MTL_MEDIA_MC6); - xe_device_mem_access_put(gt_to_xe(gt)); return reg; } @@ -801,23 +772,19 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) if (xe->info.skip_guc_pc) return 0; - xe_device_mem_access_get(pc_to_xe(pc)); - ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL); if (ret) - goto out; + return ret; ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (ret) - goto out; + return ret; xe_gt_idle_disable_c6(gt); XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); -out: - xe_device_mem_access_put(pc_to_xe(pc)); - return ret; + return 0; } static void pc_init_pcode_freq(struct xe_guc_pc *pc) @@ -870,11 +837,9 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) xe_gt_assert(gt, xe_device_uc_enabled(xe)); - xe_device_mem_access_get(pc_to_xe(pc)); - ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (ret) - goto out_fail_force_wake; + return ret; if (xe->info.skip_guc_pc) { if (xe->info.platform != XE_PVC) @@ -914,8 +879,6 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) out: XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); -out_fail_force_wake: - xe_device_mem_access_put(pc_to_xe(pc)); return ret; } @@ -928,12 +891,9 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc) struct xe_device *xe = pc_to_xe(pc); int ret; - xe_device_mem_access_get(pc_to_xe(pc)); - if (xe->info.skip_guc_pc) { xe_gt_idle_disable_c6(pc_to_gt(pc)); - ret = 0; - goto out; + return 0; } mutex_lock(&pc->freq_lock); @@ -942,16 +902,14 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc) ret = pc_action_shutdown(pc); if (ret) - goto out; + return ret; if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) { drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n"); - ret = -EIO; + return -EIO; } -out: - xe_device_mem_access_put(pc_to_xe(pc)); - return ret; + return 0; } /** @@ -962,14 +920,6 @@ out: static void xe_guc_pc_fini(struct drm_device *drm, void *arg) { struct xe_guc_pc *pc = arg; - struct xe_device *xe = pc_to_xe(pc); - - if (xe->info.skip_guc_pc) { - xe_device_mem_access_get(xe); - xe_gt_idle_disable_c6(pc_to_gt(pc)); - xe_device_mem_access_put(xe); - return; - } xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL); XE_WARN_ON(xe_guc_pc_gucrc_disable(pc)); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index e2a4c3b5e9..3757e0dc5c 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1257,6 +1257,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) return 0; err_entity: + mutex_unlock(&guc->submission_state.lock); xe_sched_entity_fini(&ge->entity); err_sched: xe_sched_fini(&ge->sched); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 2ba4fb9511..aca519f5b8 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -33,7 +33,6 @@ #include "xe_sync.h" #include "xe_trace.h" #include "xe_vm.h" -#include "xe_wa.h" /** * struct xe_migrate - migrate context. @@ -299,10 +298,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, } /* - * Due to workaround 16017236439, odd instance hardware copy engines are - * faster than even instance ones. - * This function returns the mask involving all fast copy engines and the - * reserved copy engine to be used as logical mask for migrate engine. * Including the reserved copy engine is required to avoid deadlocks due to * migrate jobs servicing the faults gets stuck behind the job that faulted. */ @@ -316,8 +311,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt) if (hwe->class != XE_ENGINE_CLASS_COPY) continue; - if (!XE_WA(gt, 16017236439) || - xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1) + if (xe_gt_is_usm_hwe(gt, hwe)) logical_mask |= BIT(hwe->logical_instance); } @@ -368,6 +362,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) if (!hwe || !logical_mask) return ERR_PTR(-EINVAL); + /* + * XXX: Currently only reserving 1 (likely slow) BCS instance on + * PVC, may want to revisit if performance is needed. + */ m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT | diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index b324dc2a5d..81f4ae2ea0 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -10,6 +10,7 @@ #include <drm/drm_managed.h> +#include "xe_device.h" #include "xe_gt.h" #include "xe_mmio.h" #include "xe_pcode_api.h" @@ -43,8 +44,6 @@ static int pcode_mailbox_status(struct xe_gt *gt) [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"}, }; - lockdep_assert_held(>->pcode.lock); - err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK; if (err) { drm_err(>_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err, @@ -55,17 +54,15 @@ static int pcode_mailbox_status(struct xe_gt *gt) return 0; } -static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, - unsigned int timeout_ms, bool return_data, - bool atomic) +static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, + unsigned int timeout_ms, bool return_data, + bool atomic) { int err; if (gt_to_xe(gt)->info.skip_pcode) return 0; - lockdep_assert_held(>->pcode.lock); - if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0) return -EAGAIN; @@ -87,6 +84,18 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, return pcode_mailbox_status(gt); } +static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, + unsigned int timeout_ms, bool return_data, + bool atomic) +{ + if (gt_to_xe(gt)->info.skip_pcode) + return 0; + + lockdep_assert_held(>->pcode.lock); + + return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic); +} + int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout) { int err; @@ -109,15 +118,19 @@ int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1) return err; } -static int xe_pcode_try_request(struct xe_gt *gt, u32 mbox, - u32 request, u32 reply_mask, u32 reply, - u32 *status, bool atomic, int timeout_us) +static int pcode_try_request(struct xe_gt *gt, u32 mbox, + u32 request, u32 reply_mask, u32 reply, + u32 *status, bool atomic, int timeout_us, bool locked) { int slept, wait = 10; for (slept = 0; slept < timeout_us; slept += wait) { - *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true, - atomic); + if (locked) + *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true, + atomic); + else + *status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true, + atomic); if ((*status == 0) && ((request & reply_mask) == reply)) return 0; @@ -158,8 +171,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request, mutex_lock(>->pcode.lock); - ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status, - false, timeout_base_ms * 1000); + ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status, + false, timeout_base_ms * 1000, true); if (!ret) goto out; @@ -177,8 +190,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request, "PCODE timeout, retrying with preemption disabled\n"); drm_WARN_ON_ONCE(>_to_xe(gt)->drm, timeout_base_ms > 1); preempt_disable(); - ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status, - true, timeout_base_ms * 1000); + ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status, + true, 50 * 1000, true); preempt_enable(); out: @@ -238,59 +251,71 @@ unlock: } /** - * xe_pcode_init - Ensure PCODE is initialized - * @gt: gt instance + * xe_pcode_ready - Ensure PCODE is initialized + * @xe: xe instance + * @locked: true if lock held, false otherwise * - * This function ensures that PCODE is properly initialized. To be called during - * probe and resume paths. + * PCODE init mailbox is polled only on root gt of root tile + * as the root tile provides the initialization is complete only + * after all the tiles have completed the initialization. + * Called only on early probe without locks and with locks in + * resume path. * - * It returns 0 on success, and -error number on failure. + * Returns 0 on success, and -error number on failure. */ -int xe_pcode_init(struct xe_gt *gt) +int xe_pcode_ready(struct xe_device *xe, bool locked) { u32 status, request = DGFX_GET_INIT_STATUS; + struct xe_gt *gt = xe_root_mmio_gt(xe); int timeout_us = 180000000; /* 3 min */ int ret; - if (gt_to_xe(gt)->info.skip_pcode) + if (xe->info.skip_pcode) return 0; - if (!IS_DGFX(gt_to_xe(gt))) + if (!IS_DGFX(xe)) return 0; - mutex_lock(>->pcode.lock); - ret = xe_pcode_try_request(gt, DGFX_PCODE_STATUS, request, - DGFX_INIT_STATUS_COMPLETE, - DGFX_INIT_STATUS_COMPLETE, - &status, false, timeout_us); - mutex_unlock(>->pcode.lock); + if (locked) + mutex_lock(>->pcode.lock); + + ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request, + DGFX_INIT_STATUS_COMPLETE, + DGFX_INIT_STATUS_COMPLETE, + &status, false, timeout_us, locked); + + if (locked) + mutex_unlock(>->pcode.lock); if (ret) - drm_err(>_to_xe(gt)->drm, + drm_err(&xe->drm, "PCODE initialization timedout after: 3 min\n"); return ret; } /** - * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized. + * xe_pcode_init: initialize components of PCODE * @gt: gt instance * - * This function initializes the xe_pcode component, and when needed, it ensures - * that PCODE has properly performed its initialization and it is really ready - * to go. To be called once only during probe. - * - * It returns 0 on success, and -error number on failure. + * This function initializes the xe_pcode component. + * To be called once only during probe. */ -int xe_pcode_probe(struct xe_gt *gt) +void xe_pcode_init(struct xe_gt *gt) { drmm_mutex_init(>_to_xe(gt)->drm, >->pcode.lock); +} - if (gt_to_xe(gt)->info.skip_pcode) - return 0; - - if (!IS_DGFX(gt_to_xe(gt))) - return 0; - - return xe_pcode_init(gt); +/** + * xe_pcode_probe_early: initializes PCODE + * @xe: xe instance + * + * This function checks the initialization status of PCODE + * To be called once only during early probe without locks. + * + * Returns 0 on success, error code otherwise + */ +int xe_pcode_probe_early(struct xe_device *xe) +{ + return xe_pcode_ready(xe, false); } diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h index 08cb1d047c..3f54c6d2a5 100644 --- a/drivers/gpu/drm/xe/xe_pcode.h +++ b/drivers/gpu/drm/xe/xe_pcode.h @@ -8,9 +8,11 @@ #include <linux/types.h> struct xe_gt; +struct xe_device; -int xe_pcode_probe(struct xe_gt *gt); -int xe_pcode_init(struct xe_gt *gt); +void xe_pcode_init(struct xe_gt *gt); +int xe_pcode_probe_early(struct xe_device *xe); +int xe_pcode_ready(struct xe_device *xe, bool locked); int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq, u32 max_gt_freq); int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1); diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 53b3b0b019..944cf4d760 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -54,13 +54,15 @@ int xe_pm_suspend(struct xe_device *xe) u8 id; int err; + drm_dbg(&xe->drm, "Suspending device\n"); + for_each_gt(gt, xe, id) xe_gt_suspend_prepare(gt); /* FIXME: Super racey... */ err = xe_bo_evict_all(xe); if (err) - return err; + goto err; xe_display_pm_suspend(xe); @@ -68,7 +70,7 @@ int xe_pm_suspend(struct xe_device *xe) err = xe_gt_suspend(gt); if (err) { xe_display_pm_resume(xe); - return err; + goto err; } } @@ -76,7 +78,11 @@ int xe_pm_suspend(struct xe_device *xe) xe_display_pm_suspend_late(xe); + drm_dbg(&xe->drm, "Device suspended\n"); return 0; +err: + drm_dbg(&xe->drm, "Device suspend failed %d\n", err); + return err; } /** @@ -92,14 +98,14 @@ int xe_pm_resume(struct xe_device *xe) u8 id; int err; + drm_dbg(&xe->drm, "Resuming device\n"); + for_each_tile(tile, xe, id) xe_wa_apply_tile_workarounds(tile); - for_each_gt(gt, xe, id) { - err = xe_pcode_init(gt); - if (err) - return err; - } + err = xe_pcode_ready(xe, true); + if (err) + return err; xe_display_pm_resume_early(xe); @@ -109,7 +115,7 @@ int xe_pm_resume(struct xe_device *xe) */ err = xe_bo_restore_kernel(xe); if (err) - return err; + goto err; xe_irq_resume(xe); @@ -120,9 +126,13 @@ int xe_pm_resume(struct xe_device *xe) err = xe_bo_restore_user(xe); if (err) - return err; + goto err; + drm_dbg(&xe->drm, "Device resumed\n"); return 0; +err: + drm_dbg(&xe->drm, "Device resume failed %d\n", err); + return err; } static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) @@ -310,11 +320,9 @@ int xe_pm_runtime_resume(struct xe_device *xe) xe->d3cold.power_lost = xe_guc_in_reset(>->uc.guc); if (xe->d3cold.allowed && xe->d3cold.power_lost) { - for_each_gt(gt, xe, id) { - err = xe_pcode_init(gt); - if (err) - goto out; - } + err = xe_pcode_ready(xe, true); + if (err) + goto out; /* * This only restores pinned memory which is the memory diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 5b2b37b598..820c8d73c4 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -79,6 +79,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i) return i; } +static int emit_flush_dw(u32 *dw, int i) +{ + dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW; + dw[i++] = 0; + dw[i++] = 0; + dw[i++] = 0; + + return i; +} + static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb, u32 *dw, int i) { @@ -233,10 +243,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); - if (job->user_fence.used) + if (job->user_fence.used) { + i = emit_flush_dw(dw, i); i = emit_store_imm_ppgtt_posted(job->user_fence.addr, job->user_fence.value, dw, i); + } i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); @@ -292,10 +304,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); - if (job->user_fence.used) + if (job->user_fence.used) { + i = emit_flush_dw(dw, i); i = emit_store_imm_ppgtt_posted(job->user_fence.addr, job->user_fence.value, dw, i); + } i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index 88eb33acd5..face8d6b2a 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -256,12 +256,12 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) if (ret) goto err_dp; + drm_bridge_add(dpsub->bridge); + if (dpsub->dma_enabled) { ret = zynqmp_dpsub_drm_init(dpsub); if (ret) goto err_disp; - } else { - drm_bridge_add(dpsub->bridge); } dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed"); @@ -288,9 +288,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev) if (dpsub->drm) zynqmp_dpsub_drm_cleanup(dpsub); - else - drm_bridge_remove(dpsub->bridge); + drm_bridge_remove(dpsub->bridge); zynqmp_disp_remove(dpsub); zynqmp_dp_remove(dpsub); diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c index fd58a86b08..d022bfb5e9 100644 --- a/drivers/greybus/interface.c +++ b/drivers/greybus/interface.c @@ -693,6 +693,7 @@ static void gb_interface_release(struct device *dev) trace_gb_interface_release(intf); + cancel_work_sync(&intf->mode_switch_work); kfree(intf); } diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c index 5b24d5f637..c2f9fc1f20 100644 --- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c +++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c @@ -227,6 +227,11 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2) struct amd_mp2_sensor_info info; int i, status; + if (!cl_data->is_any_sensor_enabled) { + amd_sfh_clear_intr(mp2); + return; + } + for (i = 0; i < cl_data->num_hid_devices; i++) { if (cl_data->sensor_sts[i] == SENSOR_DISABLED) { info.sensor_idx = cl_data->sensor_idx[i]; @@ -252,6 +257,11 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2) struct amdtp_cl_data *cl_data = mp2->cl_data; int i, status; + if (!cl_data->is_any_sensor_enabled) { + amd_sfh_clear_intr(mp2); + return; + } + for (i = 0; i < cl_data->num_hid_devices; i++) { if (cl_data->sensor_idx[i] != HPD_IDX && cl_data->sensor_sts[i] == SENSOR_ENABLED) { diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 78cdfb8b9a..88cbb2fe6a 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -335,36 +335,20 @@ static int asus_raw_event(struct hid_device *hdev, if (drvdata->quirks & QUIRK_MEDION_E1239T) return asus_e1239t_event(drvdata, data, size); - if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) { + /* + * Skip these report ID, the device emits a continuous stream associated + * with the AURA mode it is in which looks like an 'echo'. + */ + if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2) + return -1; + if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { /* - * Skip these report ID, the device emits a continuous stream associated - * with the AURA mode it is in which looks like an 'echo'. + * G713 and G733 send these codes on some keypresses, depending on + * the key pressed it can trigger a shutdown event if not caught. */ - if (report->id == FEATURE_KBD_LED_REPORT_ID1 || - report->id == FEATURE_KBD_LED_REPORT_ID2) { + if (data[0] == 0x02 && data[1] == 0x30) { return -1; - /* Additional report filtering */ - } else if (report->id == FEATURE_KBD_REPORT_ID) { - /* - * G14 and G15 send these codes on some keypresses with no - * discernable reason for doing so. We'll filter them out to avoid - * unmapped warning messages later. - */ - if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 || - data[1] == 0x8a || data[1] == 0x9e) { - return -1; - } - } - if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { - /* - * G713 and G733 send these codes on some keypresses, depending on - * the key pressed it can trigger a shutdown event if not caught. - */ - if(data[0] == 0x02 && data[1] == 0x30) { - return -1; - } } - } if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) { @@ -1250,6 +1234,19 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, rdesc[205] = 0x01; } + /* match many more n-key devices */ + if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) { + for (int i = 0; i < *rsize - 15; i++) { + /* offset to the count from 0x5a report part always 14 */ + if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a && + rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) { + hid_info(hdev, "Fixing up Asus N-Key report descriptor\n"); + rdesc[i + 15] = 0x01; + break; + } + } + } + return rdesc; } @@ -1319,4 +1316,4 @@ static struct hid_driver asus_driver = { }; module_hid_driver(asus_driver); -MODULE_LICENSE("GPL");
\ No newline at end of file +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index de7a477d66..751a73ae7d 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report, hid_warn(hid, "%s() called with too large value %d (n: %d)! (%s)\n", __func__, value, n, current->comm); - WARN_ON(1); value &= m; } } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 8376fb5e2d..68b0f39dea 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -823,6 +823,7 @@ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e #define USB_DEVICE_ID_LOGITECH_T651 0xb00c #define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309 +#define USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD 0xbb00 #define USB_DEVICE_ID_LOGITECH_C007 0xc007 #define USB_DEVICE_ID_LOGITECH_C077 0xc077 #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 3c3c497b6b..37958edec5 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, */ msleep(50); - if (retval) + if (retval) { + kfree(dj_report); return retval; + } } /* diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 04a014cd2a..56fc78841f 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2081,6 +2081,12 @@ static const struct hid_device_id mt_devices[] = { USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) }, + /* Logitech devices */ + { .driver_data = MT_CLS_NSMU, + HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LOGITECH, + USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) }, + /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, MT_USB_DEVICE(USB_VENDOR_ID_ASUS, diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c index 58b15750db..ff9078ad19 100644 --- a/drivers/hid/hid-nvidia-shield.c +++ b/drivers/hid/hid-nvidia-shield.c @@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create( return haptics; input_set_capability(haptics, EV_FF, FF_RUMBLE); - input_ff_create_memless(haptics, NULL, play_effect); + ret = input_ff_create_memless(haptics, NULL, play_effect); + if (ret) + goto err; ret = input_register_device(haptics); if (ret) diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c index 5b91fb106c..091e379332 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c @@ -31,6 +31,7 @@ struct i2c_hid_of_elan { struct regulator *vcc33; struct regulator *vccio; struct gpio_desc *reset_gpio; + bool no_reset_on_power_off; const struct elan_i2c_hid_chip_data *chip_data; }; @@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops) container_of(ops, struct i2c_hid_of_elan, ops); int ret; + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + if (ihid_elan->vcc33) { ret = regulator_enable(ihid_elan->vcc33); if (ret) - return ret; + goto err_deassert_reset; } ret = regulator_enable(ihid_elan->vccio); - if (ret) { - regulator_disable(ihid_elan->vcc33); - return ret; - } + if (ret) + goto err_disable_vcc33; if (ihid_elan->chip_data->post_power_delay_ms) msleep(ihid_elan->chip_data->post_power_delay_ms); @@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops) msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms); return 0; + +err_disable_vcc33: + if (ihid_elan->vcc33) + regulator_disable(ihid_elan->vcc33); +err_deassert_reset: + if (ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0); + + return ret; } static void elan_i2c_hid_power_down(struct i2chid_ops *ops) @@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops) struct i2c_hid_of_elan *ihid_elan = container_of(ops, struct i2c_hid_of_elan, ops); - gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + /* + * Do not assert reset when the hardware allows for it to remain + * deasserted regardless of the state of the (shared) power supply to + * avoid wasting power when the supply is left on. + */ + if (!ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms) msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms); @@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops) static int i2c_hid_of_elan_probe(struct i2c_client *client) { struct i2c_hid_of_elan *ihid_elan; + int ret; ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL); if (!ihid_elan) @@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client) if (IS_ERR(ihid_elan->reset_gpio)) return PTR_ERR(ihid_elan->reset_gpio); + ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node, + "no-reset-on-power-off"); + ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio"); - if (IS_ERR(ihid_elan->vccio)) - return PTR_ERR(ihid_elan->vccio); + if (IS_ERR(ihid_elan->vccio)) { + ret = PTR_ERR(ihid_elan->vccio); + goto err_deassert_reset; + } ihid_elan->chip_data = device_get_match_data(&client->dev); if (ihid_elan->chip_data->main_supply_name) { ihid_elan->vcc33 = devm_regulator_get(&client->dev, ihid_elan->chip_data->main_supply_name); - if (IS_ERR(ihid_elan->vcc33)) - return PTR_ERR(ihid_elan->vcc33); + if (IS_ERR(ihid_elan->vcc33)) { + ret = PTR_ERR(ihid_elan->vcc33); + goto err_deassert_reset; + } } - return i2c_hid_core_probe(client, &ihid_elan->ops, - ihid_elan->chip_data->hid_descriptor_address, 0); + ret = i2c_hid_core_probe(client, &ihid_elan->ops, + ihid_elan->chip_data->hid_descriptor_address, 0); + if (ret) + goto err_deassert_reset; + + return 0; + +err_deassert_reset: + if (ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0); + + return ret; } static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = { diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 56bd4f02f3..4b8232360c 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -173,6 +173,11 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* request and enable interrupt */ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) { + dev_err(dev, "ISH: Failed to allocate IRQ vectors\n"); + return ret; + } + if (!pdev->msi_enabled && !pdev->msix_enabled) irq_flag = IRQF_SHARED; diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c index 6500ca548f..ca2dff1589 100644 --- a/drivers/hwmon/intel-m10-bmc-hwmon.c +++ b/drivers/hwmon/intel-m10-bmc-hwmon.c @@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = { }; static const struct m10bmc_sdata n6000bmc_power_tbl[] = { - { 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" }, + { 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" }, }; static const struct hwmon_channel_info * const n6000bmc_hinfo[] = { diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c index 001799bc28..b8548105cd 100644 --- a/drivers/hwmon/ltc2992.c +++ b/drivers/hwmon/ltc2992.c @@ -876,9 +876,11 @@ static int ltc2992_parse_dt(struct ltc2992_state *st) ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val); if (!ret) { - if (!val) + if (!val) { + fwnode_handle_put(child); return dev_err_probe(&st->client->dev, -EINVAL, "shunt resistor value cannot be zero\n"); + } st->r_sense_uohm[addr] = val; } } diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c index 1f96e94967..439dd3dba5 100644 --- a/drivers/hwmon/shtc1.c +++ b/drivers/hwmon/shtc1.c @@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client) if (np) { data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io"); - data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision"); + data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision"); } else { if (client->dev.platform_data) data->setup = *(struct shtc1_platform_data *)dev->platform_data; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index c2ca4a02df..a0bdfabddb 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -1240,6 +1240,8 @@ static void etm4_init_arch_data(void *info) drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0); /* QSUPP, bits[16:15] Q element support field */ drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0); + if (drvdata->q_support) + drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT); /* TSSIZE, bits[28:24] Global timestamp size field */ drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0); @@ -1732,16 +1734,14 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcccctlr = etm4x_read32(csa, TRCCCCTLR); state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR); state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR); - state->trcqctlr = etm4x_read32(csa, TRCQCTLR); + if (drvdata->q_filt) + state->trcqctlr = etm4x_read32(csa, TRCQCTLR); state->trcvictlr = etm4x_read32(csa, TRCVICTLR); state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR); state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR); if (drvdata->nr_pe_cmp) state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR); - state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR); - state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR); - state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i)); @@ -1758,7 +1758,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i)); } - for (i = 0; i < drvdata->nr_resource * 2; i++) + /* Resource selector pair 0 is reserved */ + for (i = 2; i < drvdata->nr_resource * 2; i++) state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { @@ -1843,8 +1844,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) { int i; struct etmv4_save_state *state = drvdata->save_state; - struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base); - struct csdev_access *csa = &tmp_csa; + struct csdev_access *csa = &drvdata->csdev->access; + + if (WARN_ON(!drvdata->csdev)) + return; etm4_cs_unlock(drvdata, csa); etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET); @@ -1863,16 +1866,14 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR); etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR); etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR); - etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR); + if (drvdata->q_filt) + etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR); etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR); etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR); etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR); if (drvdata->nr_pe_cmp) etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR); - etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR); - etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR); - etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i)); @@ -1889,7 +1890,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i)); } - for (i = 0; i < drvdata->nr_resource * 2; i++) + /* Resource selector pair 0 is reserved */ + for (i = 2; i < drvdata->nr_resource * 2; i++) etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { @@ -2213,6 +2215,9 @@ static int etm4_probe_platform_dev(struct platform_device *pdev) ret = etm4_probe(&pdev->dev); pm_runtime_put(&pdev->dev); + if (ret) + pm_runtime_disable(&pdev->dev); + return ret; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 9ea678bc2e..9e9165f62e 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -43,9 +43,6 @@ #define TRCVIIECTLR 0x084 #define TRCVISSCTLR 0x088 #define TRCVIPCSSCTLR 0x08C -#define TRCVDCTLR 0x0A0 -#define TRCVDSACCTLR 0x0A4 -#define TRCVDARCCTLR 0x0A8 /* Derived resources registers */ #define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */ #define TRCSEQRSTEVR 0x118 @@ -90,9 +87,6 @@ /* Address Comparator registers n = 0-15 */ #define TRCACVRn(n) (0x400 + (n * 8)) #define TRCACATRn(n) (0x480 + (n * 8)) -/* Data Value Comparator Value registers, n = 0-7 */ -#define TRCDVCVRn(n) (0x500 + (n * 16)) -#define TRCDVCMRn(n) (0x580 + (n * 16)) /* ContextID/Virtual ContextID comparators, n = 0-7 */ #define TRCCIDCVRn(n) (0x600 + (n * 8)) #define TRCVMIDCVRn(n) (0x640 + (n * 8)) @@ -141,6 +135,7 @@ #define TRCIDR0_TRCCCI BIT(7) #define TRCIDR0_RETSTACK BIT(9) #define TRCIDR0_NUMEVENT_MASK GENMASK(11, 10) +#define TRCIDR0_QFILT BIT(14) #define TRCIDR0_QSUPP_MASK GENMASK(16, 15) #define TRCIDR0_TSSIZE_MASK GENMASK(28, 24) @@ -272,9 +267,6 @@ /* List of registers accessible via System instructions */ #define ETM4x_ONLY_SYSREG_LIST(op, val) \ CASE_##op((val), TRCPROCSELR) \ - CASE_##op((val), TRCVDCTLR) \ - CASE_##op((val), TRCVDSACCTLR) \ - CASE_##op((val), TRCVDARCCTLR) \ CASE_##op((val), TRCOSLAR) #define ETM_COMMON_SYSREG_LIST(op, val) \ @@ -422,22 +414,6 @@ CASE_##op((val), TRCACATRn(13)) \ CASE_##op((val), TRCACATRn(14)) \ CASE_##op((val), TRCACATRn(15)) \ - CASE_##op((val), TRCDVCVRn(0)) \ - CASE_##op((val), TRCDVCVRn(1)) \ - CASE_##op((val), TRCDVCVRn(2)) \ - CASE_##op((val), TRCDVCVRn(3)) \ - CASE_##op((val), TRCDVCVRn(4)) \ - CASE_##op((val), TRCDVCVRn(5)) \ - CASE_##op((val), TRCDVCVRn(6)) \ - CASE_##op((val), TRCDVCVRn(7)) \ - CASE_##op((val), TRCDVCMRn(0)) \ - CASE_##op((val), TRCDVCMRn(1)) \ - CASE_##op((val), TRCDVCMRn(2)) \ - CASE_##op((val), TRCDVCMRn(3)) \ - CASE_##op((val), TRCDVCMRn(4)) \ - CASE_##op((val), TRCDVCMRn(5)) \ - CASE_##op((val), TRCDVCMRn(6)) \ - CASE_##op((val), TRCDVCMRn(7)) \ CASE_##op((val), TRCCIDCVRn(0)) \ CASE_##op((val), TRCCIDCVRn(1)) \ CASE_##op((val), TRCCIDCVRn(2)) \ @@ -907,9 +883,6 @@ struct etmv4_save_state { u32 trcviiectlr; u32 trcvissctlr; u32 trcvipcssctlr; - u32 trcvdctlr; - u32 trcvdsacctlr; - u32 trcvdarcctlr; u32 trcseqevr[ETM_MAX_SEQ_STATES]; u32 trcseqrstevr; @@ -982,6 +955,7 @@ struct etmv4_save_state { * @os_unlock: True if access to management registers is allowed. * @instrp0: Tracing of load and store instructions * as P0 elements is supported. + * @q_filt: Q element filtering support, if Q elements are supported. * @trcbb: Indicates if the trace unit supports branch broadcast tracing. * @trccond: If the trace unit supports conditional * instruction tracing. @@ -1044,6 +1018,7 @@ struct etmv4_drvdata { bool boot_enable; bool os_unlock; bool instrp0; + bool q_filt; bool trcbb; bool trccond; bool retstack; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 147d338c19..8dad239aba 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -290,6 +290,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Meteor Lake-S CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Meteor Lake-S */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7f26), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Raptor Lake-S */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -300,6 +310,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Granite Rapids */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0963), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Granite Rapids SOC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3256), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Sapphire Rapids SOC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3456), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Lunar Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), .driver_data = (kernel_ulong_t)&intel_th_2x, diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 534fbefc7f..20895d3915 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -868,8 +868,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, return -ENOMEM; stm->major = register_chrdev(0, stm_data->name, &stm_fops); - if (stm->major < 0) - goto err_free; + if (stm->major < 0) { + err = stm->major; + vfree(stm); + return err; + } device_initialize(&stm->dev); stm->dev.devt = MKDEV(stm->major, 0); @@ -913,10 +916,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, err_device: unregister_chrdev(stm->major, stm_data->name); - /* matches device_initialize() above */ + /* calls stm_device_release() */ put_device(&stm->dev); -err_free: - vfree(stm); return err; } diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c index d6eeea5166..131a67d9d4 100644 --- a/drivers/i2c/busses/i2c-at91-slave.c +++ b/drivers/i2c/busses/i2c-at91-slave.c @@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave) static u32 at91_twi_func(struct i2c_adapter *adapter) { - return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL - | I2C_FUNC_SMBUS_READ_BLOCK_DATA; + return I2C_FUNC_SLAVE; } static const struct i2c_algorithm at91_twi_algorithm_slave = { diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 4bb7d67569..2fce3e84ba 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -633,6 +633,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) if (hold_clear) { ctrl_reg &= ~CDNS_I2C_CR_HOLD; + ctrl_reg &= ~CDNS_I2C_CR_CLR_FIFO; /* * In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size * register reaches '0'. This is an IP bug which causes transfer size diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 2e079cf20b..78e2c47e3d 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = { void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { - dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; + dev->functionality = I2C_FUNC_SLAVE; dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 6d72e4e126..36e8f6196a 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -99,6 +99,7 @@ struct lpi2c_imx_struct { __u8 *rx_buf; __u8 *tx_buf; struct completion complete; + unsigned long rate_per; unsigned int msglen; unsigned int delivered; unsigned int block_data; @@ -212,9 +213,7 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx) lpi2c_imx_set_mode(lpi2c_imx); - clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk); - if (!clk_rate) - return -EINVAL; + clk_rate = lpi2c_imx->rate_per; if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST) filt = 0; @@ -611,6 +610,20 @@ static int lpi2c_imx_probe(struct platform_device *pdev) if (ret) return ret; + /* + * Lock the parent clock rate to avoid getting parent clock upon + * each transfer + */ + ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't lock I2C peripheral clock rate\n"); + + lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk); + if (!lpi2c_imx->rate_per) + return dev_err_probe(&pdev->dev, -EINVAL, + "can't get I2C peripheral clock rate\n"); + pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index e106af83ce..350ccfbe86 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -442,8 +442,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c) oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8); /* Init the device */ - oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN); + oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); return 0; } diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index bbea521b05..a73f5bb9a1 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -550,17 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev) device_property_read_u32(&pdev->dev, "socionext,pclk-rate", &i2c->pclkrate); - i2c->pclk = devm_clk_get(&pdev->dev, "pclk"); - if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER) - return -EPROBE_DEFER; - if (!IS_ERR_OR_NULL(i2c->pclk)) { - dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk); - - ret = clk_prepare_enable(i2c->pclk); - if (ret) - return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n"); - i2c->pclkrate = clk_get_rate(i2c->pclk); - } + i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); + if (IS_ERR(i2c->pclk)) + return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk), + "failed to get and enable clock\n"); + + dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk); + i2c->pclkrate = clk_get_rate(i2c->pclk); if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE || i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE) @@ -615,8 +611,6 @@ static void synquacer_i2c_remove(struct platform_device *pdev) struct synquacer_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adapter); - if (!IS_ERR(i2c->pclk)) - clk_disable_unprepare(i2c->pclk); }; static const struct of_device_id synquacer_i2c_dt_ids[] __maybe_unused = { diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index d6037a3286..14ae0cfc32 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -445,6 +445,11 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev)); } +static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev) +{ + return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev)); +} + static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, void *arg) { @@ -471,11 +476,17 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, break; client = i2c_acpi_find_client_by_adev(adev); - if (!client) - break; + if (client) { + i2c_unregister_device(client); + put_device(&client->dev); + } + + adapter = i2c_acpi_find_adapter_by_adev(adev); + if (adapter) { + acpi_unbind_one(&adapter->dev); + put_device(&adapter->dev); + } - i2c_unregister_device(client); - put_device(&client->dev); break; } diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 5ee4db6898..bb299ce02c 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -415,6 +415,19 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) int ret; mutex_lock(&master->lock); + /* + * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing + * readl_relaxed_poll_timeout() to return immediately. Consequently, + * ibitype will be 0 since it was last updated only after the 8th SCL + * cycle, leading to missed client IBI handlers. + * + * A typical scenario is when IBIWON occurs and bus arbitration is lost + * at svc_i3c_master_priv_xfers(). + * + * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI. + */ + writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); + /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */ writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI | SVC_I3C_MCTRL_IBIRESP_AUTO, @@ -429,9 +442,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) goto reenable_ibis; } - /* Clear the interrupt status */ - writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); - status = readl(master->regs + SVC_I3C_MSTATUS); ibitype = SVC_I3C_MSTATUS_IBITYPE(status); ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status); @@ -1080,7 +1090,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master, * and yield the above events handler. */ if (SVC_I3C_MSTATUS_IBIWON(reg)) { - ret = -ENXIO; + ret = -EAGAIN; *actual_len = 0; goto emit_stop; } diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index 7475ec2a56..13e1bba45b 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -225,11 +225,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index, } static const struct iio_chan_spec ad9434_channels[] = { - AD9467_CHAN(0, 0, 12, 'S'), + AD9467_CHAN(0, 0, 12, 's'), }; static const struct iio_chan_spec ad9467_channels[] = { - AD9467_CHAN(0, 0, 16, 'S'), + AD9467_CHAN(0, 0, 16, 's'), }; static const struct ad9467_chip_info ad9467_chip_tbl = { diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index 4156639b3c..e3b2158829 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -175,6 +175,7 @@ static int adi_axi_adc_probe(struct platform_device *pdev) struct adi_axi_adc_state *st; void __iomem *base; unsigned int ver; + struct clk *clk; int ret; st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL); @@ -195,6 +196,10 @@ static int adi_axi_adc_probe(struct platform_device *pdev) if (!expected_ver) return -ENODEV; + clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + /* * Force disable the core. Up to the frontend to enable us. And we can * still read/write registers... @@ -207,9 +212,9 @@ static int adi_axi_adc_probe(struct platform_device *pdev) if (ret) return ret; - if (*expected_ver > ver) { + if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) { dev_err(&pdev->dev, - "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n", + "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n", ADI_AXI_PCORE_VER_MAJOR(*expected_ver), ADI_AXI_PCORE_VER_MINOR(*expected_ver), ADI_AXI_PCORE_VER_PATCH(*expected_ver), diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c index e0c2742da5..8a0c357422 100644 --- a/drivers/iio/adc/pac1934.c +++ b/drivers/iio/adc/pac1934.c @@ -787,6 +787,15 @@ static int pac1934_read_raw(struct iio_dev *indio_dev, s64 curr_energy; int ret, channel = chan->channel - 1; + /* + * For AVG the index should be between 5 to 8. + * To calculate PAC1934_CH_VOLTAGE_AVERAGE, + * respectively PAC1934_CH_CURRENT real index, we need + * to remove the added offset (PAC1934_MAX_NUM_CHANNELS). + */ + if (channel >= PAC1934_MAX_NUM_CHANNELS) + channel = channel - PAC1934_MAX_NUM_CHANNELS; + ret = pac1934_retrieve_data(info, PAC1934_MIN_UPDATE_WAIT_TIME_US); if (ret < 0) return ret; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index b5d3c9cea5..283c207571 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -2234,6 +2234,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev, if (vin[0] != val || vin[1] >= adc_info->max_channels) { dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n", vin[0], vin[1]); + ret = -EINVAL; goto err; } } else if (ret != -EINVAL) { diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c index 3b0f9598a7..adceb1b7a0 100644 --- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c +++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c @@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP); int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts, uint32_t period, bool fifo) { + uint32_t mult; + /* when FIFO is on, prevent odr change if one is already pending */ if (fifo && ts->new_mult != 0) return -EAGAIN; - ts->new_mult = period / ts->chip.clock_period; + mult = period / ts->chip.clock_period; + if (mult != ts->mult) + ts->new_mult = mult; return 0; } @@ -101,6 +105,9 @@ static bool inv_update_chip_period(struct inv_sensors_timestamp *ts, static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts) { + const int64_t period_min = ts->min_period * ts->mult; + const int64_t period_max = ts->max_period * ts->mult; + int64_t add_max, sub_max; int64_t delta, jitter; int64_t adjust; @@ -108,11 +115,13 @@ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts) delta = ts->it.lo - ts->timestamp; /* adjust timestamp while respecting jitter */ + add_max = period_max - (int64_t)ts->period; + sub_max = period_min - (int64_t)ts->period; jitter = INV_SENSORS_TIMESTAMP_JITTER((int64_t)ts->period, ts->chip.jitter); if (delta > jitter) - adjust = jitter; + adjust = add_max; else if (delta < -jitter) - adjust = -jitter; + adjust = sub_max; else adjust = 0; diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 076bc9ecfb..4763402dbc 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, s64 tmp = *val * (3767897513LL / 25LL); *val = div_s64_rem(tmp, 1000000000LL, val2); - return IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_NANO; } mutex_lock(&st->lock); diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c index 5d42ab9b17..67d74a1a1b 100644 --- a/drivers/iio/imu/bmi323/bmi323_core.c +++ b/drivers/iio/imu/bmi323/bmi323_core.c @@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p) &data->buffer.channels, ARRAY_SIZE(data->buffer.channels)); if (ret) - return IRQ_NONE; + goto out; } else { for_each_set_bit(bit, indio_dev->active_scan_mask, BMI323_CHAN_MAX) { @@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p) &data->buffer.channels[index++], BMI323_BYTES_PER_SAMPLE); if (ret) - return IRQ_NONE; + goto out; } } iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); +out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c index f67bd5a39b..6d9cb010f6 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c @@ -129,10 +129,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev, /* update data FIFO write */ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en); - if (ret) - goto out_unlock; - - ret = inv_icm42600_buffer_update_watermark(st); out_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c index 3df0a715e8..fc80b4a97f 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c @@ -129,10 +129,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev, /* update data FIFO write */ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en); - if (ret) - goto out_unlock; - - ret = inv_icm42600_buffer_update_watermark(st); out_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 4302093b92..8684ba2469 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1654,8 +1654,10 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) return NULL; indio_dev = &iio_dev_opaque->indio_dev; - indio_dev->priv = (char *)iio_dev_opaque + - ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN); + + if (sizeof_priv) + indio_dev->priv = (char *)iio_dev_opaque + + ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN); indio_dev->dev.parent = parent; indio_dev->dev.type = &iio_device_type; diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 62e9e93d91..0199d055e0 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2) /* * Temperature is returned in Celsius degrees in fractional - * form down 2^16. We rescale by x1000 to return milli Celsius - * to respect IIO ABI. + * form down 2^16. We rescale by x1000 to return millidegrees + * Celsius to respect IIO ABI. */ - *val = raw_temp * 1000; - *val2 = 16; - return IIO_VAL_FRACTIONAL_LOG2; + raw_temp = sign_extend32(raw_temp, 23); + *val = ((s64)raw_temp * 1000) / (1 << 16); + return IIO_VAL_INT; } static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2) diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c index 1ff091b2f7..d0a516d56d 100644 --- a/drivers/iio/pressure/dps310.c +++ b/drivers/iio/pressure/dps310.c @@ -730,7 +730,7 @@ static int dps310_read_pressure(struct dps310_data *data, int *val, int *val2, } } -static int dps310_calculate_temp(struct dps310_data *data) +static int dps310_calculate_temp(struct dps310_data *data, int *val) { s64 c0; s64 t; @@ -746,7 +746,9 @@ static int dps310_calculate_temp(struct dps310_data *data) t = c0 + ((s64)data->temp_raw * (s64)data->c1); /* Convert to milliCelsius and scale the temperature */ - return (int)div_s64(t * 1000LL, kt); + *val = (int)div_s64(t * 1000LL, kt); + + return 0; } static int dps310_read_temp(struct dps310_data *data, int *val, int *val2, @@ -768,11 +770,10 @@ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2, if (rc) return rc; - rc = dps310_calculate_temp(data); - if (rc < 0) + rc = dps310_calculate_temp(data, val); + if (rc) return rc; - *val = rc; return IIO_VAL_INT; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c index 4684580429..7a3eef5d5e 100644 --- a/drivers/iio/temperature/mcp9600.c +++ b/drivers/iio/temperature/mcp9600.c @@ -52,7 +52,8 @@ static int mcp9600_read(struct mcp9600_data *data, if (ret < 0) return ret; - *val = ret; + + *val = sign_extend32(ret, 15); return 0; } diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c index 1f5c962c18..f7f88498ba 100644 --- a/drivers/iio/temperature/mlx90635.c +++ b/drivers/iio/temperature/mlx90635.c @@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client) "failed to allocate regmap\n"); regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee); - if (IS_ERR(regmap)) - return dev_err_probe(&client->dev, PTR_ERR(regmap), - "failed to allocate regmap\n"); + if (IS_ERR(regmap_ee)) + return dev_err_probe(&client->dev, PTR_ERR(regmap_ee), + "failed to allocate EEPROM regmap\n"); mlx90635 = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index f253295795..be0743dac3 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -348,16 +348,10 @@ static int dst_fetch_ha(const struct dst_entry *dst, static bool has_gateway(const struct dst_entry *dst, sa_family_t family) { - struct rtable *rt; - struct rt6_info *rt6; - - if (family == AF_INET) { - rt = container_of(dst, struct rtable, dst); - return rt->rt_uses_gateway; - } + if (family == AF_INET) + return dst_rtable(dst)->rt_uses_gateway; - rt6 = container_of(dst, struct rt6_info, dst); - return rt6->rt6i_flags & RTF_GATEWAY; + return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY; } static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 1e2cd7c871..64ace0b968 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -715,8 +715,10 @@ cma_validate_port(struct ib_device *device, u32 port, rcu_read_lock(); ndev = rcu_dereference(sgid_attr->ndev); if (!net_eq(dev_net(ndev), dev_addr->net) || - ndev->ifindex != bound_if_index) + ndev->ifindex != bound_if_index) { + rdma_put_gid_attr(sgid_attr); sgid_attr = ERR_PTR(-ENODEV); + } rcu_read_unlock(); goto out; } diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 9dca451ed5..6974922e56 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -107,8 +107,6 @@ struct bnxt_re_gsi_context { struct bnxt_re_sqp_entries *sqp_tbl; }; -#define BNXT_RE_MIN_MSIX 2 -#define BNXT_RE_MAX_MSIX 9 #define BNXT_RE_AEQ_IDX 0 #define BNXT_RE_NQ_IDX 1 #define BNXT_RE_GEN_P5_MAX_VF 64 @@ -168,7 +166,7 @@ struct bnxt_re_dev { struct bnxt_qplib_rcfw rcfw; /* NQ */ - struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; + struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX]; /* Device Resources */ struct bnxt_qplib_dev_attr dev_attr; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 439d0c7c5d..04258676d0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1013,7 +1013,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) hwq_attr.stride = sizeof(struct sq_sge); hwq_attr.depth = bnxt_qplib_get_depth(sq); hwq_attr.aux_stride = psn_sz; - hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode); + hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode) + : 0; /* Update msn tbl size */ if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) { hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 7250d0643b..68e22f368d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) return ret; } - ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); + ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); if (ret) { ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret); goto err_put; @@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) return 0; err_xa: - xa_erase(&cq_table->array, hr_cq->cqn); + xa_erase_irq(&cq_table->array, hr_cq->cqn); err_put: hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); @@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, hr_cq->cqn); - xa_erase(&cq_table->array, hr_cq->cqn); + xa_erase_irq(&cq_table->array, hr_cq->cqn); /* Waiting interrupt process procedure carried out */ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); @@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) struct ib_event event; struct ib_cq *ibcq; - hr_cq = xa_load(&hr_dev->cq_table.array, - cqn & (hr_dev->caps.num_cqs - 1)); - if (!hr_cq) { - dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn); - return; - } - if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { @@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) return; } - refcount_inc(&hr_cq->refcount); + xa_lock(&hr_dev->cq_table.array); + hr_cq = xa_load(&hr_dev->cq_table.array, + cqn & (hr_dev->caps.num_cqs - 1)); + if (hr_cq) + refcount_inc(&hr_cq->refcount); + xa_unlock(&hr_dev->cq_table.array); + if (!hr_cq) { + dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn); + return; + } ibcq = &hr_cq->ib_cq; if (ibcq->event_handler) { diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index c3cbd0a494..0b47c6d688 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -100,6 +100,9 @@ #define CQ_BANKID_SHIFT 2 #define CQ_BANKID_MASK GENMASK(1, 0) +#define HNS_ROCE_MAX_CQ_COUNT 0xFFFF +#define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF + enum { SERV_TYPE_RC, SERV_TYPE_UC, diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index a4b3f19161..658c522be7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -281,7 +281,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, return hem; fail: - hns_roce_free_hem(hr_dev, hem); + kfree(hem); return NULL; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index 6fb51db968..9c415b2541 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -57,16 +57,16 @@ enum { }; #define check_whether_bt_num_3(type, hop_num) \ - (type < HEM_TYPE_MTT && hop_num == 2) + ((type) < HEM_TYPE_MTT && (hop_num) == 2) #define check_whether_bt_num_2(type, hop_num) \ - ((type < HEM_TYPE_MTT && hop_num == 1) || \ - (type >= HEM_TYPE_MTT && hop_num == 2)) + (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == 2)) #define check_whether_bt_num_1(type, hop_num) \ - ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \ - (type >= HEM_TYPE_MTT && hop_num == 1) || \ - (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)) + (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0)) struct hns_roce_hem { void *buf; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index ba7ae792d2..8800464c9a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2105,7 +2105,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) caps->gmv_bt_num * (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz)); - caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE / + caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz); } else { u32 func_num = max_t(u32, 1, hr_dev->func_num); @@ -3711,8 +3711,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, wc->status == IB_WC_WR_FLUSH_ERR)) return; - ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, + ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n", + cqe_status); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe, cq->cqe_size, false); wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS); @@ -5802,7 +5803,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) dev_info(hr_dev->dev, "cq_period(%u) reached the upper limit, adjusted to 65.\n", cq_period); - cq_period = HNS_ROCE_MAX_CQ_PERIOD; + cq_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08; } cq_period *= HNS_ROCE_CLOCK_ADJUST; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index df04bc8ede..dfed6b4ddb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -1334,7 +1334,7 @@ struct fmea_ram_ecc { /* only for RNR timeout issue of HIP08 */ #define HNS_ROCE_CLOCK_ADJUST 1000 -#define HNS_ROCE_MAX_CQ_PERIOD 65 +#define HNS_ROCE_MAX_CQ_PERIOD_HIP08 65 #define HNS_ROCE_MAX_EQ_PERIOD 65 #define HNS_ROCE_RNR_TIMER_10NS 1 #define HNS_ROCE_1US_CFG 999 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 1dc60c2b2b..d202258368 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -37,9 +37,11 @@ #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_cache.h> +#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" +#include "hns_roce_hw_v2.h" static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, const u8 *addr) @@ -192,6 +194,12 @@ static int hns_roce_query_device(struct ib_device *ib_dev, IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = 1; props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; + props->max_ah = INT_MAX; + props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD; + props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT; + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { props->max_srq = hr_dev->caps.num_srqs; props->max_srq_wr = hr_dev->caps.max_srq_wrs; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 9e05b57a2d..80c050d7d0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mtr *mtr = &mr->pbl_mtr; - int ret = 0; + int ret, sg_num = 0; mr->npages = 0; mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, sizeof(dma_addr_t), GFP_KERNEL); if (!mr->page_list) - return ret; + return sg_num; - ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); - if (ret < 1) { + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); + if (sg_num < 1) { ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); + mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); goto err_page_list; } @@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); if (ret) { ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); - ret = 0; + sg_num = 0; } else { mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); - ret = mr->npages; } err_page_list: kvfree(mr->page_list); mr->page_list = NULL; - return ret; + return sg_num; } static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 4abae94778..8f48c6723e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -123,7 +123,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) return ret; } - ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); + ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); if (ret) { ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); goto err_put; @@ -136,7 +136,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) return 0; err_xa: - xa_erase(&srq_table->xa, srq->srqn); + xa_erase_irq(&srq_table->xa, srq->srqn); err_put: hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); @@ -154,7 +154,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", ret, srq->srqn); - xa_erase(&srq_table->xa, srq->srqn); + xa_erase_irq(&srq_table->xa, srq->srqn); if (refcount_dec_and_test(&srq->refcount)) complete(&srq->free); diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c index 4a71e678d0..89fcc09ded 100644 --- a/drivers/infiniband/hw/mana/cq.c +++ b/drivers/infiniband/hw/mana/cq.c @@ -39,37 +39,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } cq->cqe = attr->cqe; - cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, - IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(cq->umem)) { - err = PTR_ERR(cq->umem); - ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n", - err); - return err; - } - - err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region); + err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue); if (err) { - ibdev_dbg(ibdev, - "Failed to create dma region for create cq, %d\n", - err); - goto err_release_umem; + ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err); + return err; } - ibdev_dbg(ibdev, - "create_dma_region ret %d gdma_region 0x%llx\n", - err, cq->gdma_region); - - /* - * The CQ ID is not known at this time. The ID is generated at create_qp - */ - cq->id = INVALID_QUEUE_ID; - return 0; - -err_release_umem: - ib_umem_release(cq->umem); - return err; } int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) @@ -78,24 +54,16 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) struct ib_device *ibdev = ibcq->device; struct mana_ib_dev *mdev; struct gdma_context *gc; - int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); gc = mdev_to_gc(mdev); - err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); - if (err) { - ibdev_dbg(ibdev, - "Failed to destroy dma region, %d\n", err); - return err; + if (cq->queue.id != INVALID_QUEUE_ID) { + kfree(gc->cq_table[cq->queue.id]); + gc->cq_table[cq->queue.id] = NULL; } - if (cq->id != INVALID_QUEUE_ID) { - kfree(gc->cq_table[cq->id]); - gc->cq_table[cq->id] = NULL; - } - - ib_umem_release(cq->umem); + mana_ib_destroy_queue(mdev, &cq->queue); return 0; } @@ -113,8 +81,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) struct gdma_context *gc = mdev_to_gc(mdev); struct gdma_queue *gdma_cq; + if (cq->queue.id >= gc->max_num_cqs) + return -EINVAL; /* Create CQ table entry */ - WARN_ON(gc->cq_table[cq->id]); + WARN_ON(gc->cq_table[cq->queue.id]); gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL); if (!gdma_cq) return -ENOMEM; @@ -122,7 +92,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) gdma_cq->cq.context = cq; gdma_cq->type = GDMA_CQ; gdma_cq->cq.callback = mana_ib_cq_handler; - gdma_cq->id = cq->id; - gc->cq_table[cq->id] = gdma_cq; + gdma_cq->id = cq->queue.id; + gc->cq_table[cq->queue.id] = gdma_cq; return 0; } diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 71e33feee6..4524c6b807 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -237,6 +237,49 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); } +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, + struct mana_ib_queue *queue) +{ + struct ib_umem *umem; + int err; + + queue->umem = NULL; + queue->id = INVALID_QUEUE_ID; + queue->gdma_region = GDMA_INVALID_DMA_REGION; + + umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(umem)) { + err = PTR_ERR(umem); + ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err); + return err; + } + + err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region); + if (err) { + ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err); + goto free_umem; + } + queue->umem = umem; + + ibdev_dbg(&mdev->ib_dev, + "create_dma_region ret %d gdma_region 0x%llx\n", + err, queue->gdma_region); + + return 0; +free_umem: + ib_umem_release(umem); + return err; +} + +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue) +{ + /* Ignore return code as there is not much we can do about it. + * The error message is printed inside. + */ + mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region); + ib_umem_release(queue->umem); +} + static int mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index f83390eebb..6acb5c281c 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps { u32 max_inline_data_size; }; +struct mana_ib_queue { + struct ib_umem *umem; + u64 gdma_region; + u64 id; +}; + struct mana_ib_dev { struct ib_device ib_dev; struct gdma_dev *gdma_dev; @@ -82,10 +88,8 @@ struct mana_ib_mr { struct mana_ib_cq { struct ib_cq ibcq; - struct ib_umem *umem; + struct mana_ib_queue queue; int cqe; - u64 gdma_region; - u64 id; u32 comp_vector; }; @@ -169,6 +173,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, mana_handle_t gdma_region); +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, + struct mana_ib_queue *queue); +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue); + struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c index b70b13484f..13a49d8fd4 100644 --- a/drivers/infiniband/hw/mana/mr.c +++ b/drivers/infiniband/hw/mana/mr.c @@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x", start, iova, length, access_flags); + access_flags &= ~IB_ACCESS_OPTIONAL; if (access_flags & ~VALID_MR_FLAGS) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index 6e7627745c..d7485ee6a6 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -197,7 +197,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, wq_spec.gdma_region = wq->gdma_region; wq_spec.queue_size = wq->wq_buf_size; - cq_spec.gdma_region = cq->gdma_region; + cq_spec.gdma_region = cq->queue.gdma_region; cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues]; @@ -213,16 +213,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, /* The GDMA regions are now owned by the WQ object */ wq->gdma_region = GDMA_INVALID_DMA_REGION; - cq->gdma_region = GDMA_INVALID_DMA_REGION; + cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; wq->id = wq_spec.queue_index; - cq->id = cq_spec.queue_index; + cq->queue.id = cq_spec.queue_index; ibdev_dbg(&mdev->ib_dev, "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", - ret, wq->rx_object, wq->id, cq->id); + ret, wq->rx_object, wq->id, cq->queue.id); - resp.entries[i].cqid = cq->id; + resp.entries[i].cqid = cq->queue.id; resp.entries[i].wqid = wq->id; mana_ind_table[i] = wq->rx_object; @@ -232,7 +232,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, if (ret) goto fail; - gdma_cq_allocated[i] = gc->cq_table[cq->id]; + gdma_cq_allocated[i] = gc->cq_table[cq->queue.id]; } resp.num_entries = i; @@ -264,7 +264,7 @@ fail: wq = container_of(ibwq, struct mana_ib_wq, ibwq); cq = container_of(ibcq, struct mana_ib_cq, ibcq); - gc->cq_table[cq->id] = NULL; + gc->cq_table[cq->queue.id] = NULL; kfree(gdma_cq_allocated[i]); mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); @@ -374,7 +374,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, wq_spec.gdma_region = qp->sq_gdma_region; wq_spec.queue_size = ucmd.sq_buf_size; - cq_spec.gdma_region = send_cq->gdma_region; + cq_spec.gdma_region = send_cq->queue.gdma_region; cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; eq_vec = send_cq->comp_vector % gc->max_num_queues; @@ -392,10 +392,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, /* The GDMA regions are now owned by the WQ object */ qp->sq_gdma_region = GDMA_INVALID_DMA_REGION; - send_cq->gdma_region = GDMA_INVALID_DMA_REGION; + send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; qp->sq_id = wq_spec.queue_index; - send_cq->id = cq_spec.queue_index; + send_cq->queue.id = cq_spec.queue_index; /* Create CQ table entry */ err = mana_ib_install_cq_cb(mdev, send_cq); @@ -404,10 +404,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, ibdev_dbg(&mdev->ib_dev, "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err, - qp->tx_object, qp->sq_id, send_cq->id); + qp->tx_object, qp->sq_id, send_cq->queue.id); resp.sqid = qp->sq_id; - resp.cqid = send_cq->id; + resp.cqid = send_cq->queue.id; resp.tx_vp_offset = pd->tx_vp_offset; err = ib_copy_to_udata(udata, &resp, sizeof(resp)); @@ -422,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, err_release_gdma_cq: kfree(gdma_cq); - gc->cq_table[send_cq->id] = NULL; + gc->cq_table[send_cq->queue.id] = NULL; err_destroy_wq_obj: mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c2b557e642..9fb8a54423 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3760,10 +3760,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) spin_lock_init(&dev->dm.lock); dev->dm.dev = mdev; return 0; -err: - mlx5r_macsec_dealloc_gids(dev); err_mp: mlx5_ib_cleanup_multiport_master(dev); +err: + mlx5r_macsec_dealloc_gids(dev); return err; } diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 96ffbbaf0a..5a22be14d9 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/io.h> #include <rdma/ib_umem_odp.h> #include "mlx5_ib.h" #include <linux/jiffies.h> @@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id, __be32 mmio_wqe[16] = {}; unsigned long flags; unsigned int idx; - int i; if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) return -EIO; @@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id, * we hit doorbell */ wmb(); - for (i = 0; i < 8; i++) - mlx5_write64(&mmio_wqe[i * 2], - bf->bfreg->map + bf->offset + i * 8); - io_stop_wc(); + __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe, + sizeof(mmio_wqe) / 8); bf->offset ^= bf->buf_size; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index a8de35c07c..f255a12e26 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -643,9 +643,10 @@ struct mlx5_ib_mkey { unsigned int ndescs; struct wait_queue_head wait; refcount_t usecount; - /* User Mkey must hold either a rb_key or a cache_ent. */ + /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */ struct mlx5r_cache_rb_key rb_key; struct mlx5_cache_ent *cache_ent; + u8 cacheable : 1; }; #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a8ee2ca1f4..d3c1f63791 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); MLX5_SET(mkc, mkc, access_mode_4_2, (ent->rb_key.access_mode >> 2) & 0x7); + MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); MLX5_SET(mkc, mkc, translations_octword_size, get_mkc_octo_size(ent->rb_key.access_mode, @@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache, new = &((*new)->rb_left); if (cmp < 0) new = &((*new)->rb_right); - if (cmp == 0) { - mutex_unlock(&cache->rb_lock); + if (cmp == 0) return -EEXIST; - } } /* Add new node and rebalance tree. */ @@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, } mr->mmkey.cache_ent = ent; mr->mmkey.type = MLX5_MKEY_MR; + mr->mmkey.rb_key = ent->rb_key; + mr->mmkey.cacheable = true; init_waitqueue_head(&mr->mmkey.wait); return mr; } @@ -1158,6 +1159,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, if (IS_ERR(mr)) return mr; mr->mmkey.rb_key = rb_key; + mr->mmkey.cacheable = true; return mr; } @@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev, unsigned int diffs = current_access_flags ^ target_access_flags; if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING)) + IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING | + IB_ACCESS_REMOTE_ATOMIC)) return false; return mlx5r_umr_can_reconfig(dev, current_access_flags, target_access_flags); @@ -1835,6 +1838,23 @@ end: return ret; } +static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) +{ + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; + + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) + return 0; + + if (ent) { + spin_lock_irq(&ent->mkeys_queue.lock); + ent->in_use--; + mr->mmkey.cache_ent = NULL; + spin_unlock_irq(&ent->mkeys_queue.lock); + } + return destroy_mkey(dev, mr); +} + int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct mlx5_ib_mr *mr = to_mmr(ibmr); @@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) } /* Stop DMA */ - if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) - if (mlx5r_umr_revoke_mr(mr) || - cache_ent_find_and_store(dev, mr)) - mr->mmkey.cache_ent = NULL; - - if (!mr->mmkey.cache_ent) { - rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); - if (rc) - return rc; - } + rc = mlx5_revoke_mr(mr); + if (rc) + return rc; if (mr->umem) { bool is_odp = is_odp_mr(mr); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index a056ea835d..84be0c3d56 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq, int err; struct mlx5_srq_attr in = {}; __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); + __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) / + sizeof(struct mlx5_wqe_data_seg); if (init_attr->srq_type != IB_SRQT_BASIC && init_attr->srq_type != IB_SRQT_XRC && init_attr->srq_type != IB_SRQT_TM) return -EOPNOTSUPP; - /* Sanity check SRQ size before proceeding */ - if (init_attr->attr.max_wr >= max_srq_wqes) { - mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", - init_attr->attr.max_wr, - max_srq_wqes); + /* Sanity check SRQ and sge size before proceeding */ + if (init_attr->attr.max_wr >= max_srq_wqes || + init_attr->attr.max_sge > max_sge_sz) { + mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n", + init_attr->attr.max_wr, max_srq_wqes, + init_attr->attr.max_sge, max_sge_sz); return -EINVAL; } diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index b78b8c0856..c997b7cbf2 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -131,12 +131,12 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) { int must_sched; - skb_queue_tail(&qp->resp_pkts, skb); - - must_sched = skb_queue_len(&qp->resp_pkts) > 1; + must_sched = skb_queue_len(&qp->resp_pkts) > 0; if (must_sched != 0) rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED); + skb_queue_tail(&qp->resp_pkts, skb); + if (must_sched) rxe_sched_task(&qp->comp.task); else diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index cd59666158..e5827064ab 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -366,18 +366,10 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); - if (skb->protocol == htons(ETH_P_IP)) { + if (skb->protocol == htons(ETH_P_IP)) err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); - } else if (skb->protocol == htons(ETH_P_IPV6)) { + else err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); - } else { - rxe_dbg_qp(pkt->qp, "Unknown layer 3 protocol: %d\n", - skb->protocol); - atomic_dec(&pkt->qp->skb_out); - rxe_put(pkt->qp); - kfree_skb(skb); - return -EINVAL; - } if (unlikely(net_xmit_eval(err))) { rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 963382f625..fa2b87c749 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -354,6 +354,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, * receive buffer later. For rmda operations additional * length checks are performed in check_rkey. */ + if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { + unsigned int payload = payload_size(pkt); + unsigned int recv_buffer_len = 0; + int i; + + for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) + recv_buffer_len += qp->resp.wqe->dma.sge[i].length; + if (payload + 40 > recv_buffer_len) { + rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); + return RESPST_ERR_LENGTH; + } + } + if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))) { unsigned int mtu = qp->mtu; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 614581989b..a7e9510666 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, int i; for (i = 0; i < ibwr->num_sge; i++, sge++) { - memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length); + memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length); p += sge->length; } } @@ -888,6 +888,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, { int err = 0; unsigned long flags; + int good = 0; spin_lock_irqsave(&qp->sq.sq_lock, flags); while (ibwr) { @@ -895,12 +896,15 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, if (err) { *bad_wr = ibwr; break; + } else { + good++; } ibwr = ibwr->next; } spin_unlock_irqrestore(&qp->sq.sq_lock, flags); - if (!err) + /* kickoff processing of any posted wqes */ + if (good) rxe_sched_task(&qp->req.task); spin_lock_irqsave(&qp->state_lock, flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 4bd161e86f..562df2b3ef 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -184,8 +184,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ppriv = ipoib_priv(pdev); - snprintf(intf_name, sizeof(intf_name), "%s.%04x", - ppriv->dev->name, pkey); + /* If you increase IFNAMSIZ, update snprintf below + * to allow longer names. + */ + BUILD_BUG_ON(IFNAMSIZ != 16); + snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name, + pkey); ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); if (IS_ERR(ndev)) { diff --git a/drivers/input/input.c b/drivers/input/input.c index 7114854375..fd4997ba26 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1378,19 +1378,19 @@ static int input_print_modalias_bits(char *buf, int size, char name, const unsigned long *bm, unsigned int min_bit, unsigned int max_bit) { - int len = 0, i; + int bit = min_bit; + int len = 0; len += snprintf(buf, max(size, 0), "%c", name); - for (i = min_bit; i < max_bit; i++) - if (bm[BIT_WORD(i)] & BIT_MASK(i)) - len += snprintf(buf + len, max(size - len, 0), "%X,", i); + for_each_set_bit_from(bit, bm, max_bit) + len += snprintf(buf + len, max(size - len, 0), "%X,", bit); return len; } -static int input_print_modalias(char *buf, int size, const struct input_dev *id, - int add_cr) +static int input_print_modalias_parts(char *buf, int size, int full_len, + const struct input_dev *id) { - int len; + int len, klen, remainder, space; len = snprintf(buf, max(size, 0), "input:b%04Xv%04Xp%04Xe%04X-", @@ -1399,8 +1399,48 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id, len += input_print_modalias_bits(buf + len, size - len, 'e', id->evbit, 0, EV_MAX); - len += input_print_modalias_bits(buf + len, size - len, + + /* + * Calculate the remaining space in the buffer making sure we + * have place for the terminating 0. + */ + space = max(size - (len + 1), 0); + + klen = input_print_modalias_bits(buf + len, size - len, 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); + len += klen; + + /* + * If we have more data than we can fit in the buffer, check + * if we can trim key data to fit in the rest. We will indicate + * that key data is incomplete by adding "+" sign at the end, like + * this: * "k1,2,3,45,+,". + * + * Note that we shortest key info (if present) is "k+," so we + * can only try to trim if key data is longer than that. + */ + if (full_len && size < full_len + 1 && klen > 3) { + remainder = full_len - len; + /* + * We can only trim if we have space for the remainder + * and also for at least "k+," which is 3 more characters. + */ + if (remainder <= space - 3) { + /* + * We are guaranteed to have 'k' in the buffer, so + * we need at least 3 additional bytes for storing + * "+," in addition to the remainder. + */ + for (int i = size - 1 - remainder - 3; i >= 0; i--) { + if (buf[i] == 'k' || buf[i] == ',') { + strcpy(buf + i + 1, "+,"); + len = i + 3; /* Not counting '\0' */ + break; + } + } + } + } + len += input_print_modalias_bits(buf + len, size - len, 'r', id->relbit, 0, REL_MAX); len += input_print_modalias_bits(buf + len, size - len, @@ -1416,12 +1456,25 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id, len += input_print_modalias_bits(buf + len, size - len, 'w', id->swbit, 0, SW_MAX); - if (add_cr) - len += snprintf(buf + len, max(size - len, 0), "\n"); - return len; } +static int input_print_modalias(char *buf, int size, const struct input_dev *id) +{ + int full_len; + + /* + * Printing is done in 2 passes: first one figures out total length + * needed for the modalias string, second one will try to trim key + * data in case when buffer is too small for the entire modalias. + * If the buffer is too small regardless, it will fill as much as it + * can (without trimming key data) into the buffer and leave it to + * the caller to figure out what to do with the result. + */ + full_len = input_print_modalias_parts(NULL, 0, 0, id); + return input_print_modalias_parts(buf, size, full_len, id); +} + static ssize_t input_dev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) @@ -1429,7 +1482,9 @@ static ssize_t input_dev_show_modalias(struct device *dev, struct input_dev *id = to_input_dev(dev); ssize_t len; - len = input_print_modalias(buf, PAGE_SIZE, id, 1); + len = input_print_modalias(buf, PAGE_SIZE, id); + if (len < PAGE_SIZE - 2) + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); return min_t(int, len, PAGE_SIZE); } @@ -1641,6 +1696,23 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env, return 0; } +/* + * This is a pretty gross hack. When building uevent data the driver core + * may try adding more environment variables to kobj_uevent_env without + * telling us, so we have no idea how much of the buffer we can use to + * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially + * reduce amount of memory we will use for the modalias environment variable. + * + * The potential additions are: + * + * SEQNUM=18446744073709551615 - (%llu - 28 bytes) + * HOME=/ (6 bytes) + * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes) + * + * 68 bytes total. Allow extra buffer - 96 bytes + */ +#define UEVENT_ENV_EXTRA_LEN 96 + static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, const struct input_dev *dev) { @@ -1650,9 +1722,11 @@ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, return -ENOMEM; len = input_print_modalias(&env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen, - dev, 0); - if (len >= (sizeof(env->buf) - env->buflen)) + (int)sizeof(env->buf) - env->buflen - + UEVENT_ENV_EXTRA_LEN, + dev); + if (len >= ((int)sizeof(env->buf) - env->buflen - + UEVENT_ENV_EXTRA_LEN)) return -ENOMEM; env->buflen += len; diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 6e8cc28deb..80d16c92a0 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -42,8 +42,8 @@ struct ims_pcu_backlight { #define IMS_PCU_PART_NUMBER_LEN 15 #define IMS_PCU_SERIAL_NUMBER_LEN 8 #define IMS_PCU_DOM_LEN 8 -#define IMS_PCU_FW_VERSION_LEN (9 + 1) -#define IMS_PCU_BL_VERSION_LEN (9 + 1) +#define IMS_PCU_FW_VERSION_LEN 16 +#define IMS_PCU_BL_VERSION_LEN 16 #define IMS_PCU_BL_RESET_REASON_LEN (2 + 1) #define IMS_PCU_PCU_B_DEVICE_ID 5 diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c index 5c288fe7ac..79f478d3a9 100644 --- a/drivers/input/misc/pm8xxx-vibrator.c +++ b/drivers/input/misc/pm8xxx-vibrator.c @@ -13,7 +13,8 @@ #define VIB_MAX_LEVEL_mV (3100) #define VIB_MIN_LEVEL_mV (1200) -#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV) +#define VIB_PER_STEP_mV (100) +#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV + VIB_PER_STEP_mV) #define MAX_FF_SPEED 0xff @@ -117,10 +118,10 @@ static void pm8xxx_work_handler(struct work_struct *work) vib->active = true; vib->level = ((VIB_MAX_LEVELS * vib->speed) / MAX_FF_SPEED) + VIB_MIN_LEVEL_mV; - vib->level /= 100; + vib->level /= VIB_PER_STEP_mV; } else { vib->active = false; - vib->level = VIB_MIN_LEVEL_mV / 100; + vib->level = VIB_MIN_LEVEL_mV / VIB_PER_STEP_mV; } pm8xxx_vib_set(vib, vib->active); diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c index 5979deabe2..256f757a13 100644 --- a/drivers/input/mouse/cyapa.c +++ b/drivers/input/mouse/cyapa.c @@ -1347,10 +1347,16 @@ static int cyapa_suspend(struct device *dev) u8 power_mode; int error; - error = mutex_lock_interruptible(&cyapa->state_sync_lock); + error = mutex_lock_interruptible(&cyapa->input->mutex); if (error) return error; + error = mutex_lock_interruptible(&cyapa->state_sync_lock); + if (error) { + mutex_unlock(&cyapa->input->mutex); + return error; + } + /* * Runtime PM is enable only when device is in operational mode and * users in use, so need check it before disable it to @@ -1385,6 +1391,8 @@ static int cyapa_suspend(struct device *dev) cyapa->irq_wake = (enable_irq_wake(client->irq) == 0); mutex_unlock(&cyapa->state_sync_lock); + mutex_unlock(&cyapa->input->mutex); + return 0; } @@ -1394,6 +1402,7 @@ static int cyapa_resume(struct device *dev) struct cyapa *cyapa = i2c_get_clientdata(client); int error; + mutex_lock(&cyapa->input->mutex); mutex_lock(&cyapa->state_sync_lock); if (device_may_wakeup(dev) && cyapa->irq_wake) { @@ -1412,6 +1421,7 @@ static int cyapa_resume(struct device *dev) enable_irq(client->irq); mutex_unlock(&cyapa->state_sync_lock); + mutex_unlock(&cyapa->input->mutex); return 0; } diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c index 50552dc7b4..676b0bda3d 100644 --- a/drivers/input/serio/ioc3kbd.c +++ b/drivers/input/serio/ioc3kbd.c @@ -200,9 +200,16 @@ static void ioc3kbd_remove(struct platform_device *pdev) serio_unregister_port(d->aux); } +static const struct platform_device_id ioc3kbd_id_table[] = { + { "ioc3-kbd", }, + { } +}; +MODULE_DEVICE_TABLE(platform, ioc3kbd_id_table); + static struct platform_driver ioc3kbd_driver = { .probe = ioc3kbd_probe, .remove_new = ioc3kbd_remove, + .id_table = ioc3kbd_id_table, .driver = { .name = "ioc3-kbd", }, diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c index 96735800b1..ba4cc08684 100644 --- a/drivers/interconnect/qcom/qcm2290.c +++ b/drivers/interconnect/qcom/qcm2290.c @@ -164,7 +164,7 @@ static struct qcom_icc_node mas_snoc_bimc = { .name = "mas_snoc_bimc", .buswidth = 16, .qos.ap_owned = true, - .qos.qos_port = 2, + .qos.qos_port = 6, .qos.qos_mode = NOC_QOS_MODE_BYPASS, .mas_rpm_id = 164, .slv_rpm_id = -1, diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index ac6754a85f..e740dc54c4 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1678,8 +1678,17 @@ static void __init free_pci_segments(void) } } +static void __init free_sysfs(struct amd_iommu *iommu) +{ + if (iommu->iommu.dev) { + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + } +} + static void __init free_iommu_one(struct amd_iommu *iommu) { + free_sysfs(iommu); free_cwwb_sem(iommu); free_command_buffer(iommu); free_event_buffer(iommu); @@ -2097,6 +2106,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) amd_iommu_max_glx_val = glxval; else amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); + + iommu_enable_gt(iommu); } if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu)) @@ -2773,7 +2784,6 @@ static void early_enable_iommu(struct amd_iommu *iommu) iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_set_exclusion_range(iommu); - iommu_enable_gt(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); iommu_enable_irtcachedis(iommu); @@ -2830,7 +2840,6 @@ static void early_enable_iommus(void) iommu_disable_irtcachedis(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); - iommu_enable_gt(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); iommu_enable_irtcachedis(iommu); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 41f93c3ab1..3afec8714c 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3402,7 +3402,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX); /* Add callback to free MSIs on teardown */ - devm_add_action(dev, arm_smmu_free_msis, dev); + devm_add_action_or_reset(dev, arm_smmu_free_msis, dev); } static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a7ecd90303..e4a03588a8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -221,12 +221,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON); int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); -static int dmar_map_gfx = 1; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int iommu_skip_te_disable; +static int disable_igfx_iommu; -#define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; @@ -265,7 +264,7 @@ static int __init intel_iommu_setup(char *str) no_platform_optin = 1; pr_info("IOMMU disabled\n"); } else if (!strncmp(str, "igfx_off", 8)) { - dmar_map_gfx = 0; + disable_igfx_iommu = 1; pr_info("Disable GFX device mapping\n"); } else if (!strncmp(str, "forcedac", 8)) { pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); @@ -2402,9 +2401,6 @@ static int device_def_domain_type(struct device *dev) if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return IOMMU_DOMAIN_IDENTITY; - - if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) - return IOMMU_DOMAIN_IDENTITY; } return 0; @@ -2705,9 +2701,6 @@ static int __init init_dmars(void) iommu_set_root_entry(iommu); } - if (!dmar_map_gfx) - iommu_identity_mapping |= IDENTMAP_GFX; - check_tylersburg_isoch(); ret = si_domain_init(hw_pass_through); @@ -2798,7 +2791,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ drhd->gfx_dedicated = 1; - if (!dmar_map_gfx) + if (disable_igfx_iommu) drhd->ignored = 1; } } @@ -4875,7 +4868,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev) return; pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); - dmar_map_gfx = 0; + disable_igfx_iommu = 1; } /* G4x/GM45 integrated gfx dmar support is totally busted. */ @@ -4956,8 +4949,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) if (!(ggc & GGC_MEMORY_VT_ENABLED)) { pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); - dmar_map_gfx = 0; - } else if (dmar_map_gfx) { + disable_igfx_iommu = 1; + } else if (!disable_igfx_iommu) { /* we have to ensure the gfx device is idle before we flush */ pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n"); iommu_set_dma_strict(); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index a95a483def..659a77f7bb 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -3317,15 +3317,26 @@ EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); static int __iommu_set_group_pasid(struct iommu_domain *domain, struct iommu_group *group, ioasid_t pasid) { - struct group_device *device; - int ret = 0; + struct group_device *device, *last_gdev; + int ret; for_each_group_device(group, device) { ret = domain->ops->set_dev_pasid(domain, device->dev, pasid); if (ret) - break; + goto err_revert; } + return 0; + +err_revert: + last_gdev = device; + for_each_group_device(group, device) { + const struct iommu_ops *ops = dev_iommu_ops(device->dev); + + if (device == last_gdev) + break; + ops->remove_dev_pasid(device->dev, pasid); + } return ret; } @@ -3383,10 +3394,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain, } ret = __iommu_set_group_pasid(domain, group, pasid); - if (ret) { - __iommu_remove_group_pasid(group, pasid); + if (ret) xa_erase(&group->pasid_array, pasid); - } out_unlock: mutex_unlock(&group->mutex); return ret; diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c index 9c8b1349ee..a1430ab60a 100644 --- a/drivers/irqchip/irq-alpine-msi.c +++ b/drivers/irqchip/irq-alpine-msi.c @@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, return 0; err_sgi: - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); alpine_msix_free_sgi(priv, sgi, nr_irqs); return err; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 5f7d3db3af..6dbac6ec77 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1846,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; if (!info->map) return -EINVAL; - raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm) { struct its_vlpi_map *maps; maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), GFP_ATOMIC); - if (!maps) { - ret = -ENOMEM; - goto out; - } + if (!maps) + return -ENOMEM; its_dev->event_map.vm = info->map->vm; its_dev->event_map.vlpi_maps = maps; } else if (its_dev->event_map.vm != info->map->vm) { - ret = -EINVAL; - goto out; + return -EINVAL; } /* Get our private copy of the mapping information */ @@ -1899,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) its_dev->event_map.nr_vlpis++; } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_vlpi_map *map; - int ret = 0; - - raw_spin_lock(&its_dev->event_map.vlpi_lock); map = get_vlpi_map(d); - if (!its_dev->event_map.vm || !map) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !map) + return -EINVAL; /* Copy our mapping information to the incoming request */ *info->map = *map; -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_unmap(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; - - raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; /* Drop the virtual mapping */ its_send_discard(its_dev, event); @@ -1962,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d) kfree(its_dev->event_map.vlpi_maps); } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) @@ -1992,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) if (!is_v4(its_dev->its)) return -EINVAL; + guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock); + /* Unmap request? */ if (!info) return its_vlpi_unmap(d); diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index 6e1e1f011b..dd4d699170 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain, err_hwirq: pch_msi_free_hwirq(priv, hwirq, nr_irqs); - irq_domain_free_irqs_parent(domain, virq, i - 1); + irq_domain_free_irqs_parent(domain, virq, i); return err; } diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index 9e71c44288..4f3a12383a 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -253,8 +253,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { - struct fwnode_handle *fn; struct acpi_madt_rintc *rintc; + struct fwnode_handle *fn; + int rc; rintc = (struct acpi_madt_rintc *)header; @@ -273,7 +274,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, return -ENOMEM; } - return riscv_intc_init_common(fn, &riscv_intc_chip); + rc = riscv_intc_init_common(fn, &riscv_intc_chip); + if (rc) + irq_domain_free_fwnode(fn); + + return rc; } IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL, diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index f3d4cb9e34..d246b7230b 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -85,7 +85,7 @@ struct plic_handler { struct plic_priv *priv; }; static int plic_parent_irq __ro_after_init; -static bool plic_cpuhp_setup_done __ro_after_init; +static bool plic_global_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); static int plic_irq_set_type(struct irq_data *d, unsigned int type); @@ -490,10 +490,8 @@ static int plic_probe(struct platform_device *pdev) unsigned long plic_quirks = 0; struct plic_handler *handler; u32 nr_irqs, parent_hwirq; - struct irq_domain *domain; struct plic_priv *priv; irq_hw_number_t hwirq; - bool cpuhp_setup; if (is_of_node(dev->fwnode)) { const struct of_device_id *id; @@ -552,14 +550,6 @@ static int plic_probe(struct platform_device *pdev) continue; } - /* Find parent domain and register chained handler */ - domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); - if (!plic_parent_irq && domain) { - plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); - if (plic_parent_irq) - irq_set_chained_handler(plic_parent_irq, plic_handle_irq); - } - /* * When running in M-mode we need to ignore the S-mode handler. * Here we assume it always comes later, but that might be a @@ -600,25 +590,35 @@ done: goto fail_cleanup_contexts; /* - * We can have multiple PLIC instances so setup cpuhp state + * We can have multiple PLIC instances so setup global state * and register syscore operations only once after context * handlers of all online CPUs are initialized. */ - if (!plic_cpuhp_setup_done) { - cpuhp_setup = true; + if (!plic_global_setup_done) { + struct irq_domain *domain; + bool global_setup = true; + for_each_online_cpu(cpu) { handler = per_cpu_ptr(&plic_handlers, cpu); if (!handler->present) { - cpuhp_setup = false; + global_setup = false; break; } } - if (cpuhp_setup) { + + if (global_setup) { + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (domain) + plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (plic_parent_irq) + irq_set_chained_handler(plic_parent_irq, plic_handle_irq); + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, "irqchip/sifive/plic:starting", plic_starting_cpu, plic_dying_cpu); register_syscore_ops(&plic_irq_syscore_ops); - plic_cpuhp_setup_done = true; + plic_global_setup_done = true; } } diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 24fcff682b..ba1be15cfd 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent, led_init_core(led_cdev); #ifdef CONFIG_LEDS_TRIGGERS - /* - * If no default trigger was given and hw_control_trigger is set, - * make it the default trigger. - */ - if (!led_cdev->default_trigger && led_cdev->hw_control_trigger) - led_cdev->default_trigger = led_cdev->hw_control_trigger; led_trigger_set_default(led_cdev); #endif diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 4e3936a39d..e1b414b403 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -53,7 +53,13 @@ static int led_pwm_set(struct led_classdev *led_cdev, duty = led_dat->pwmstate.period - duty; led_dat->pwmstate.duty_cycle = duty; - led_dat->pwmstate.enabled = true; + /* + * Disabling a PWM doesn't guarantee that it emits the inactive level. + * So keep it on. Only for suspending the PWM should be disabled because + * otherwise it refuses to suspend. The possible downside is that the + * LED might stay (or even go) on. + */ + led_dat->pwmstate.enabled = !(led_cdev->flags & LED_SUSPENDED); return pwm_apply_might_sleep(led_dat->pwm, &led_dat->pwmstate); } diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index db9270da5b..b6ddf1d47c 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -140,24 +140,19 @@ static int macii_probe(void) /* Initialize the driver */ static int macii_init(void) { - unsigned long flags; int err; - local_irq_save(flags); - err = macii_init_via(); if (err) - goto out; + return err; err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB", macii_interrupt); if (err) - goto out; + return err; macii_state = idle; -out: - local_irq_restore(flags); - return err; + return 0; } /* initialize the hardware */ diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index ead2200f39..033aff11f8 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -465,7 +465,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan) struct cmdq_task *task, *tmp; unsigned long flags; - WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev)); + WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0); spin_lock_irqsave(&thread->chan->lock, flags); if (list_empty(&thread->task_busy_list)) diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 2bba4d6aaa..463eb13bd0 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b) int __bch_count_data(struct btree_keys *b) { unsigned int ret = 0; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey *k; if (b->ops->is_extents) @@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) { va_list args; struct bkey *k, *p = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; const char *err; for_each_key(b, k, &iter) { @@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, unsigned int status = BTREE_INSERT_STATUS_NO_INSERT; struct bset *i = bset_tree_last(b)->data; struct bkey *m, *prev = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey preceding_key_on_stack = ZERO_KEY; struct bkey *preceding_key_p = &preceding_key_on_stack; @@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, else preceding_key(k, &preceding_key_p); - m = bch_btree_iter_init(b, &iter, preceding_key_p); + m = bch_btree_iter_stack_init(b, &iter, preceding_key_p); - if (b->ops->insert_fixup(b, k, &iter, replace_key)) + if (b->ops->insert_fixup(b, k, &iter.iter, replace_key)) return status; status = BTREE_INSERT_STATUS_INSERT; @@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, btree_iter_cmp)); } -static struct bkey *__bch_btree_iter_init(struct btree_keys *b, - struct btree_iter *iter, - struct bkey *search, - struct bset_tree *start) +static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b, + struct btree_iter_stack *iter, + struct bkey *search, + struct bset_tree *start) { struct bkey *ret = NULL; - iter->size = ARRAY_SIZE(iter->data); - iter->used = 0; + iter->iter.size = ARRAY_SIZE(iter->stack_data); + iter->iter.used = 0; #ifdef CONFIG_BCACHE_DEBUG - iter->b = b; + iter->iter.b = b; #endif for (; start <= bset_tree_last(b); start++) { ret = bch_bset_search(b, start, search); - bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); + bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data)); } return ret; } -struct bkey *bch_btree_iter_init(struct btree_keys *b, - struct btree_iter *iter, +struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, + struct btree_iter_stack *iter, struct bkey *search) { - return __bch_btree_iter_init(b, iter, search, b->set); + return __bch_btree_iter_stack_init(b, iter, search, b->set); } static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, @@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, struct bset_sort_state *state) { size_t order = b->page_order, keys = 0; - struct btree_iter iter; + struct btree_iter_stack iter; int oldsize = bch_count_data(b); - __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); + __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]); if (start) { unsigned int i; @@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, order = get_order(__set_bytes(b->set->data, keys)); } - __btree_sort(b, &iter, start, order, false, state); + __btree_sort(b, &iter.iter, start, order, false, state); EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); } @@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, struct bset_sort_state *state) { uint64_t start_time = local_clock(); - struct btree_iter iter; + struct btree_iter_stack iter; - bch_btree_iter_init(b, &iter, NULL); + bch_btree_iter_stack_init(b, &iter, NULL); - btree_mergesort(b, new->set->data, &iter, false, true); + btree_mergesort(b, new->set->data, &iter.iter, false, true); bch_time_stats_update(&state->time, start_time); diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index d795c84246..011f6062c4 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -321,7 +321,14 @@ struct btree_iter { #endif struct btree_iter_set { struct bkey *k, *end; - } data[MAX_BSETS]; + } data[]; +}; + +/* Fixed-size btree_iter that can be allocated on the stack */ + +struct btree_iter_stack { + struct btree_iter iter; + struct btree_iter_set stack_data[MAX_BSETS]; }; typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k); @@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, struct bkey *end); -struct bkey *bch_btree_iter_init(struct btree_keys *b, - struct btree_iter *iter, - struct bkey *search); +struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, + struct btree_iter_stack *iter, + struct bkey *search); struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, const struct bkey *search); @@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b, return search ? __bch_bset_search(b, t, search) : t->data->start; } -#define for_each_key_filter(b, k, iter, filter) \ - for (bch_btree_iter_init((b), (iter), NULL); \ - ((k) = bch_btree_iter_next_filter((iter), (b), filter));) +#define for_each_key_filter(b, k, stack_iter, filter) \ + for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ + ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \ + filter));) -#define for_each_key(b, k, iter) \ - for (bch_btree_iter_init((b), (iter), NULL); \ - ((k) = bch_btree_iter_next(iter));) +#define for_each_key(b, k, stack_iter) \ + for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ + ((k) = bch_btree_iter_next(&((stack_iter)->iter)));) /* Sorting */ diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 196cdacce3..d011a7154d 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1309,7 +1309,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) uint8_t stale = 0; unsigned int keys = 0, good_keys = 0; struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; struct bset_tree *t; gc->nodes++; @@ -1570,7 +1570,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, static unsigned int btree_gc_count_keys(struct btree *b) { struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; unsigned int ret = 0; for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) @@ -1611,17 +1611,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, int ret = 0; bool should_rewrite; struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; struct gc_merge_info r[GC_MERGE_NODES]; struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; - bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); + bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); for (i = r; i < r + ARRAY_SIZE(r); i++) i->b = ERR_PTR(-EINTR); while (1) { - k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); + k = bch_btree_iter_next_filter(&iter.iter, &b->keys, + bch_ptr_bad); if (k) { r->b = bch_btree_node_get(b->c, op, k, b->level - 1, true, b); @@ -1911,7 +1912,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) { int ret = 0; struct bkey *k, *p = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) bch_initial_mark_key(b->c, b->level, k); @@ -1919,10 +1920,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) bch_initial_mark_key(b->c, b->level + 1, &b->key); if (b->level) { - bch_btree_iter_init(&b->keys, &iter, NULL); + bch_btree_iter_stack_init(&b->keys, &iter, NULL); do { - k = bch_btree_iter_next_filter(&iter, &b->keys, + k = bch_btree_iter_next_filter(&iter.iter, &b->keys, bch_ptr_bad); if (k) { btree_node_prefetch(b, k); @@ -1950,7 +1951,7 @@ static int bch_btree_check_thread(void *arg) struct btree_check_info *info = arg; struct btree_check_state *check_state = info->state; struct cache_set *c = check_state->c; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey *k, *p; int cur_idx, prev_idx, skip_nr; @@ -1959,8 +1960,8 @@ static int bch_btree_check_thread(void *arg) ret = 0; /* root node keys are checked before thread created */ - bch_btree_iter_init(&c->root->keys, &iter, NULL); - k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); + bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); BUG_ON(!k); p = k; @@ -1978,7 +1979,7 @@ static int bch_btree_check_thread(void *arg) skip_nr = cur_idx - prev_idx; while (skip_nr) { - k = bch_btree_iter_next_filter(&iter, + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); if (k) @@ -2051,7 +2052,7 @@ int bch_btree_check(struct cache_set *c) int ret = 0; int i; struct bkey *k = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; struct btree_check_state check_state; /* check and mark root node keys */ @@ -2547,11 +2548,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, if (b->level) { struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; - bch_btree_iter_init(&b->keys, &iter, from); + bch_btree_iter_stack_init(&b->keys, &iter, from); - while ((k = bch_btree_iter_next_filter(&iter, &b->keys, + while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, bch_ptr_bad))) { ret = bcache_btree(map_nodes_recurse, k, b, op, from, fn, flags); @@ -2580,11 +2581,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, { int ret = MAP_CONTINUE; struct bkey *k; - struct btree_iter iter; + struct btree_iter_stack iter; - bch_btree_iter_init(&b->keys, &iter, from); + bch_btree_iter_stack_init(&b->keys, &iter, from); - while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { + while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, + bch_ptr_bad))) { ret = !b->level ? fn(op, b, k) : bcache_btree(map_keys_recurse, k, diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 330bcd9ea4..b0819be048 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1914,8 +1914,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) INIT_LIST_HEAD(&c->btree_cache_freed); INIT_LIST_HEAD(&c->data_buckets); - iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) * - sizeof(struct btree_iter_set); + iter_size = sizeof(struct btree_iter) + + ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) * + sizeof(struct btree_iter_set); c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); if (!c->devices) diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 6956beb553..826b14cae4 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c) unsigned int bytes = 0; struct bkey *k; struct btree *b; - struct btree_iter iter; + struct btree_iter_stack iter; goto lock_root; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 8827a6f130..792e070ccf 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg) struct dirty_init_thrd_info *info = arg; struct bch_dirty_init_state *state = info->state; struct cache_set *c = state->c; - struct btree_iter iter; + struct btree_iter_stack iter; struct bkey *k, *p; int cur_idx, prev_idx, skip_nr; k = p = NULL; prev_idx = 0; - bch_btree_iter_init(&c->root->keys, &iter, NULL); - k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); + bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); BUG_ON(!k); p = k; @@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg) skip_nr = cur_idx - prev_idx; while (skip_nr) { - k = bch_btree_iter_next_filter(&iter, + k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); if (k) @@ -979,7 +979,7 @@ void bch_sectors_dirty_init(struct bcache_device *d) int i; struct btree *b = NULL; struct bkey *k = NULL; - struct btree_iter iter; + struct btree_iter_stack iter; struct sectors_dirty_init op; struct cache_set *c = d->c; struct bch_dirty_init_state state; diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 5eabdb06c6..2ac43d1f1b 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -154,8 +154,10 @@ static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; - if (dc->kdelayd_wq) + if (dc->kdelayd_wq) { + timer_shutdown_sync(&dc->delay_timer); destroy_workqueue(dc->kdelayd_wq); + } if (dc->read.dev) dm_put_device(ti, dc->read.dev); @@ -240,19 +242,18 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ret = delay_class_ctr(ti, &dc->flush, argv); if (ret) goto bad; - max_delay = max(max_delay, dc->write.delay); - max_delay = max(max_delay, dc->flush.delay); goto out; } ret = delay_class_ctr(ti, &dc->write, argv + 3); if (ret) goto bad; + max_delay = max(max_delay, dc->write.delay); + if (argc == 6) { ret = delay_class_ctr(ti, &dc->flush, argv + 3); if (ret) goto bad; - max_delay = max(max_delay, dc->flush.delay); goto out; } @@ -267,8 +268,7 @@ out: * In case of small requested delays, use kthread instead of * timers and workqueue to achieve better latency. */ - dc->worker = kthread_create(&flush_worker_fn, dc, - "dm-delay-flush-worker"); + dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker"); if (IS_ERR(dc->worker)) { ret = PTR_ERR(dc->worker); dc->worker = NULL; @@ -335,7 +335,7 @@ static void delay_presuspend(struct dm_target *ti) mutex_unlock(&delayed_bios_lock); if (!delay_is_fast(dc)) - del_timer_sync(&dc->delay_timer); + timer_delete(&dc->delay_timer); flush_delayed_bios(dc, true); } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 7f3dc8ee6a..417fddebe3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3492,6 +3492,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); limits->dma_alignment = limits->logical_block_size - 1; + limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT; } limits->max_integrity_segments = USHRT_MAX; } diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 059afc24c0..0a2d37eb38 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1424,7 +1424,7 @@ __acquires(bitmap->lock) sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; - sector_t csize; + sector_t csize = ((sector_t)1) << bitmap->chunkshift; int err; if (page >= bitmap->pages) { @@ -1433,6 +1433,7 @@ __acquires(bitmap->lock) * End-of-device while looking for a whole page or * user set a huge number to sysfs bitmap_set_bits. */ + *blocks = csize - (offset & (csize - 1)); return NULL; } err = md_bitmap_checkpage(bitmap, page, create, 0); @@ -1441,8 +1442,7 @@ __acquires(bitmap->lock) bitmap->bp[page].map == NULL) csize = ((sector_t)1) << (bitmap->chunkshift + PAGE_COUNTER_SHIFT); - else - csize = ((sector_t)1) << bitmap->chunkshift; + *blocks = csize - (offset & (csize - 1)); if (err < 0) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d874abfc18..2bd1ce9b39 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -36,7 +36,6 @@ */ #include <linux/blkdev.h> -#include <linux/delay.h> #include <linux/kthread.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> @@ -6734,6 +6733,9 @@ static void raid5d(struct md_thread *thread) int batch_size, released; unsigned int offset; + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) + break; + released = release_stripe_list(conf, conf->temp_inactive_list); if (released) clear_bit(R5_DID_ALLOC, &conf->cache_state); @@ -6770,18 +6772,7 @@ static void raid5d(struct md_thread *thread) spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); spin_lock_irq(&conf->device_lock); - - /* - * Waiting on MD_SB_CHANGE_PENDING below may deadlock - * seeing md_check_recovery() is needed to clear - * the flag when using mdmon. - */ - continue; } - - wait_event_lock_irq(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), - conf->device_lock); } pr_debug("%d stripes handled\n", handled); diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 559a172ebc..da09834990 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -490,6 +490,15 @@ int cec_thread_func(void *_adap) goto unlock; } + if (adap->transmit_in_progress && + adap->transmit_in_progress_aborted) { + if (adap->transmitting) + cec_data_cancel(adap->transmitting, + CEC_TX_STATUS_ABORTED, 0); + adap->transmit_in_progress = false; + adap->transmit_in_progress_aborted = false; + goto unlock; + } if (adap->transmit_in_progress && timeout) { /* * If we timeout, then log that. Normally this does @@ -771,6 +780,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, { struct cec_data *data; bool is_raw = msg_is_raw(msg); + int err; if (adap->devnode.unregistered) return -ENODEV; @@ -935,11 +945,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, * Release the lock and wait, retake the lock afterwards. */ mutex_unlock(&adap->lock); - wait_for_completion_killable(&data->c); - if (!data->completed) - cancel_delayed_work_sync(&data->work); + err = wait_for_completion_killable(&data->c); + cancel_delayed_work_sync(&data->work); mutex_lock(&adap->lock); + if (err) + adap->transmit_in_progress_aborted = true; + /* Cancel the transmit if it was interrupted */ if (!data->completed) { if (data->msg.tx_status & CEC_TX_STATUS_OK) @@ -1575,9 +1587,12 @@ unconfigure: */ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block) { - if (WARN_ON(adap->is_configuring || adap->is_configured)) + if (WARN_ON(adap->is_claiming_log_addrs || + adap->is_configuring || adap->is_configured)) return; + adap->is_claiming_log_addrs = true; + init_completion(&adap->config_completion); /* Ready to kick off the thread */ @@ -1592,6 +1607,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block) wait_for_completion(&adap->config_completion); mutex_lock(&adap->lock); } + adap->is_claiming_log_addrs = false; } /* diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c index 67dc79ef17..3ef9153443 100644 --- a/drivers/media/cec/core/cec-api.c +++ b/drivers/media/cec/core/cec-api.c @@ -178,7 +178,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh, CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU | CEC_LOG_ADDRS_FL_CDC_ONLY; mutex_lock(&adap->lock); - if (!adap->is_configuring && + if (!adap->is_claiming_log_addrs && !adap->is_configuring && (!log_addrs.num_log_addrs || !adap->is_configured) && !cec_is_busy(adap, fh)) { err = __cec_s_log_addrs(adap, &log_addrs, block); @@ -664,6 +664,8 @@ static int cec_release(struct inode *inode, struct file *filp) list_del_init(&data->xfer_list); } mutex_unlock(&adap->lock); + + mutex_lock(&fh->lock); while (!list_empty(&fh->msgs)) { struct cec_msg_entry *entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list); @@ -681,6 +683,7 @@ static int cec_release(struct inode *inode, struct file *filp) kfree(entry); } } + mutex_unlock(&fh->lock); kfree(fh); cec_put_device(devnode); diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index 2638875924..231b45632a 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -2176,6 +2176,11 @@ static int lgdt3306a_probe(struct i2c_client *client) struct dvb_frontend *fe; int ret; + if (!client->dev.platform_data) { + dev_err(&client->dev, "platform data is mandatory\n"); + return -EINVAL; + } + config = kmemdup(client->dev.platform_data, sizeof(struct lgdt3306a_config), GFP_KERNEL); if (config == NULL) { diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c index 4ebbcf05cc..91e9c37839 100644 --- a/drivers/media/dvb-frontends/mxl5xx.c +++ b/drivers/media/dvb-frontends/mxl5xx.c @@ -1381,57 +1381,57 @@ static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id, u32 nco_count_min = 0; u32 clk_type = 0; - struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = { {0x90700010, 8, 1}, {0x90700010, 9, 1}, {0x90700010, 10, 1}, {0x90700010, 11, 1}, {0x90700010, 12, 1}, {0x90700010, 13, 1}, {0x90700010, 14, 1}, {0x90700010, 15, 1} }; - struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = { {0x90700010, 16, 1}, {0x90700010, 17, 1}, {0x90700010, 18, 1}, {0x90700010, 19, 1}, {0x90700010, 20, 1}, {0x90700010, 21, 1}, {0x90700010, 22, 1}, {0x90700010, 23, 1} }; - struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = { {0x90700014, 0, 1}, {0x90700014, 1, 1}, {0x90700014, 2, 1}, {0x90700014, 3, 1}, {0x90700014, 4, 1}, {0x90700014, 5, 1}, {0x90700014, 6, 1}, {0x90700014, 7, 1} }; - struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = { {0x90700018, 0, 3}, {0x90700018, 4, 3}, {0x90700018, 8, 3}, {0x90700018, 12, 3}, {0x90700018, 16, 3}, {0x90700018, 20, 3}, {0x90700018, 24, 3}, {0x90700018, 28, 3} }; - struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = { {0x9070000C, 16, 1}, {0x9070000C, 17, 1}, {0x9070000C, 18, 1}, {0x9070000C, 19, 1}, {0x9070000C, 20, 1}, {0x9070000C, 21, 1}, {0x9070000C, 22, 1}, {0x9070000C, 23, 1} }; - struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = { {0x90700010, 0, 1}, {0x90700010, 1, 1}, {0x90700010, 2, 1}, {0x90700010, 3, 1}, {0x90700010, 4, 1}, {0x90700010, 5, 1}, {0x90700010, 6, 1}, {0x90700010, 7, 1} }; - struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = { {0x9070000C, 0, 1}, {0x9070000C, 1, 1}, {0x9070000C, 2, 1}, {0x9070000C, 3, 1}, {0x9070000C, 4, 1}, {0x9070000C, 5, 1}, {0x9070000C, 6, 1}, {0x9070000C, 7, 1} }; - struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = { {0x9070000C, 24, 1}, {0x9070000C, 25, 1}, {0x9070000C, 26, 1}, {0x9070000C, 27, 1}, {0x9070000C, 28, 1}, {0x9070000C, 29, 1}, {0x9070000C, 30, 1}, {0x9070000C, 31, 1} }; - struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = { {0x90700014, 8, 1}, {0x90700014, 9, 1}, {0x90700014, 10, 1}, {0x90700014, 11, 1}, {0x90700014, 12, 1}, {0x90700014, 13, 1}, {0x90700014, 14, 1}, {0x90700014, 15, 1} }; - struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = { {0x907001D4, 0, 1}, {0x907001D4, 1, 1}, {0x907001D4, 2, 1}, {0x907001D4, 3, 1}, {0x907001D4, 4, 1}, {0x907001D4, 5, 1}, {0x907001D4, 6, 1}, {0x907001D4, 7, 1} }; - struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = { + static const struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = { {0x90700044, 16, 80}, {0x90700044, 16, 81}, {0x90700044, 16, 82}, {0x90700044, 16, 83}, {0x90700044, 16, 84}, {0x90700044, 16, 85}, diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c index f548b1bb75..e932d25ca7 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_driver.c +++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c @@ -1475,7 +1475,7 @@ err_mutex: return ret; } -static void __exit et8ek8_remove(struct i2c_client *client) +static void et8ek8_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev); @@ -1517,7 +1517,7 @@ static struct i2c_driver et8ek8_i2c_driver = { .of_match_table = et8ek8_of_table, }, .probe = et8ek8_probe, - .remove = __exit_p(et8ek8_remove), + .remove = et8ek8_remove, .id_table = et8ek8_id_table, }; diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c index 39d321e2b7..4577a8977c 100644 --- a/drivers/media/i2c/ov2680.c +++ b/drivers/media/i2c/ov2680.c @@ -1116,25 +1116,24 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor) sensor->pixel_rate = sensor->link_freq[0] * 2; do_div(sensor->pixel_rate, 10); - /* Verify bus cfg */ - if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1) { - ret = dev_err_probe(dev, -EINVAL, - "only a 1-lane CSI2 config is supported"); - goto out_free_bus_cfg; + if (!bus_cfg.nr_of_link_frequencies) { + dev_warn(dev, "Consider passing 'link-frequencies' in DT\n"); + goto skip_link_freq_validation; } for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) if (bus_cfg.link_frequencies[i] == sensor->link_freq[0]) break; - if (bus_cfg.nr_of_link_frequencies == 0 || - bus_cfg.nr_of_link_frequencies == i) { + if (bus_cfg.nr_of_link_frequencies == i) { ret = dev_err_probe(dev, -EINVAL, "supported link freq %lld not found\n", sensor->link_freq[0]); goto out_free_bus_cfg; } +skip_link_freq_validation: + ret = 0; out_free_bus_cfg: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c index 552935ccb4..57906df7be 100644 --- a/drivers/media/i2c/ov2740.c +++ b/drivers/media/i2c/ov2740.c @@ -768,14 +768,15 @@ static int ov2740_init_controls(struct ov2740 *ov2740) cur_mode = ov2740->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); - ov2740->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops, - V4L2_CID_LINK_FREQ, - size - 1, 0, - link_freq_menu_items); + ov2740->link_freq = + v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops, + V4L2_CID_LINK_FREQ, size - 1, + ov2740->supported_modes->link_freq_index, + link_freq_menu_items); if (ov2740->link_freq) ov2740->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; - pixel_rate = to_pixel_rate(OV2740_LINK_FREQ_360MHZ_INDEX); + pixel_rate = to_pixel_rate(ov2740->supported_modes->link_freq_index); ov2740->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, pixel_rate, 1, pixel_rate); diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c index 7f67825c87..318e267e79 100644 --- a/drivers/media/mc/mc-devnode.c +++ b/drivers/media/mc/mc-devnode.c @@ -245,15 +245,14 @@ int __must_check media_devnode_register(struct media_device *mdev, kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor); /* Part 3: Add the media and char device */ + set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); ret = cdev_device_add(&devnode->cdev, &devnode->dev); if (ret < 0) { + clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); pr_err("%s: cdev_device_add failed\n", __func__); goto cdev_add_error; } - /* Part 4: Activate this minor. The char device can now be used. */ - set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags); - return 0; cdev_add_error: diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index 0e28b9a793..96dd0f6ccd 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -619,6 +619,12 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, link = list_entry(entry->links, typeof(*link), list); last_link = media_pipeline_walk_pop(walk); + if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (not data-link)\n"); + return 0; + } + dev_dbg(walk->mdev->dev, "media pipeline: exploring link '%s':%u -> '%s':%u\n", link->source->entity->name, link->source->index, diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index e994db4f4d..61750cc98d 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -15,6 +15,8 @@ #include <media/ipu-bridge.h> #include <media/v4l2-fwnode.h> +#define ADEV_DEV(adev) ACPI_PTR(&((adev)->dev)) + /* * 92335fcf-3203-4472-af93-7b4453ac29da * @@ -87,6 +89,7 @@ static const char * const ipu_vcm_types[] = { "lc898212axb", }; +#if IS_ENABLED(CONFIG_ACPI) /* * Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev() * instead of device and driver match to probe IVSC device. @@ -100,13 +103,13 @@ static const struct acpi_device_id ivsc_acpi_ids[] = { static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev) { - acpi_handle handle = acpi_device_handle(adev); - struct acpi_device *consumer, *ivsc_adev; unsigned int i; for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) { const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i]; + struct acpi_device *consumer, *ivsc_adev; + acpi_handle handle = acpi_device_handle(adev); for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1) /* camera sensor depends on IVSC in DSDT if exist */ for_each_acpi_consumer_dev(ivsc_adev, consumer) @@ -118,6 +121,12 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev return NULL; } +#else +static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev) +{ + return NULL; +} +#endif static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev) { @@ -163,7 +172,7 @@ static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor, csi_dev = ipu_bridge_get_ivsc_csi_dev(adev); if (!csi_dev) { acpi_dev_put(adev); - dev_err(&adev->dev, "Failed to find MEI CSI dev\n"); + dev_err(ADEV_DEV(adev), "Failed to find MEI CSI dev\n"); return -ENODEV; } @@ -182,24 +191,25 @@ static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id, acpi_status status; int ret = 0; - status = acpi_evaluate_object(adev->handle, id, NULL, &buffer); + status = acpi_evaluate_object(ACPI_PTR(adev->handle), + id, NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; obj = buffer.pointer; if (!obj) { - dev_err(&adev->dev, "Couldn't locate ACPI buffer\n"); + dev_err(ADEV_DEV(adev), "Couldn't locate ACPI buffer\n"); return -ENODEV; } if (obj->type != ACPI_TYPE_BUFFER) { - dev_err(&adev->dev, "Not an ACPI buffer\n"); + dev_err(ADEV_DEV(adev), "Not an ACPI buffer\n"); ret = -ENODEV; goto out_free_buff; } if (obj->buffer.length > size) { - dev_err(&adev->dev, "Given buffer is too small\n"); + dev_err(ADEV_DEV(adev), "Given buffer is too small\n"); ret = -EINVAL; goto out_free_buff; } @@ -220,7 +230,7 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev, case IPU_SENSOR_ROTATION_INVERTED: return 180; default: - dev_warn(&adev->dev, + dev_warn(ADEV_DEV(adev), "Unknown rotation %d. Assume 0 degree rotation\n", ssdb->degree); return 0; @@ -230,12 +240,14 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev, static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev) { enum v4l2_fwnode_orientation orientation; - struct acpi_pld_info *pld; - acpi_status status; + struct acpi_pld_info *pld = NULL; + acpi_status status = AE_ERROR; +#if IS_ENABLED(CONFIG_ACPI) status = acpi_get_physical_device_location(adev->handle, &pld); +#endif if (ACPI_FAILURE(status)) { - dev_warn(&adev->dev, "_PLD call failed, using default orientation\n"); + dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n"); return V4L2_FWNODE_ORIENTATION_EXTERNAL; } @@ -253,7 +265,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL; break; default: - dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel); + dev_warn(ADEV_DEV(adev), "Unknown _PLD panel val %d\n", + pld->panel); orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL; break; } @@ -272,12 +285,12 @@ int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor) return ret; if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) { - dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype); + dev_warn(ADEV_DEV(adev), "Unknown VCM type %d\n", ssdb.vcmtype); ssdb.vcmtype = 0; } if (ssdb.lanes > IPU_MAX_LANES) { - dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n"); + dev_err(ADEV_DEV(adev), "Number of lanes in SSDB is invalid\n"); return -EINVAL; } @@ -465,8 +478,14 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge, sensor->ipu_properties); if (sensor->csi_dev) { + const char *device_hid = ""; + +#if IS_ENABLED(CONFIG_ACPI) + device_hid = acpi_device_hid(sensor->ivsc_adev); +#endif + snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u", - acpi_device_hid(sensor->ivsc_adev), sensor->link); + device_hid, sensor->link); nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name, sensor->ivsc_properties); @@ -631,11 +650,15 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg, { struct fwnode_handle *fwnode, *primary; struct ipu_sensor *sensor; - struct acpi_device *adev; + struct acpi_device *adev = NULL; int ret; +#if IS_ENABLED(CONFIG_ACPI) for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) { - if (!adev->status.enabled) +#else + while (true) { +#endif + if (!ACPI_PTR(adev->status.enabled)) continue; if (bridge->n_sensors >= IPU_MAX_PORTS) { @@ -671,7 +694,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg, goto err_free_swnodes; } - sensor->adev = acpi_dev_get(adev); + sensor->adev = ACPI_PTR(acpi_dev_get(adev)); primary = acpi_fwnode_handle(adev); primary->secondary = fwnode; @@ -727,11 +750,16 @@ static int ipu_bridge_ivsc_is_ready(void) unsigned int i; for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) { +#if IS_ENABLED(CONFIG_ACPI) const struct ipu_sensor_config *cfg = &ipu_supported_sensors[i]; for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) { - if (!sensor_adev->status.enabled) +#else + while (true) { + sensor_adev = NULL; +#endif + if (!ACPI_PTR(sensor_adev->status.enabled)) continue; adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev); diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index c42adc5a40..00090e7f5f 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1752,11 +1752,6 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev); - /* Register notifier for subdevices we care */ - r = cio2_parse_firmware(cio2); - if (r) - goto fail_clean_notifier; - r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED, CIO2_NAME, cio2); if (r) { @@ -1764,6 +1759,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, goto fail_clean_notifier; } + /* Register notifier for subdevices we care */ + r = cio2_parse_firmware(cio2); + if (r) + goto fail_clean_notifier; + pm_runtime_put_noidle(dev); pm_runtime_allow(dev); diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c index 9bcf10a77f..97c833a8de 100644 --- a/drivers/media/pci/mgb4/mgb4_core.c +++ b/drivers/media/pci/mgb4/mgb4_core.c @@ -642,9 +642,6 @@ static void mgb4_remove(struct pci_dev *pdev) struct mgb4_dev *mgbdev = pci_get_drvdata(pdev); int i; -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(mgbdev->debugfs); -#endif #if IS_REACHABLE(CONFIG_HWMON) hwmon_device_unregister(mgbdev->hwmon_dev); #endif @@ -659,6 +656,10 @@ static void mgb4_remove(struct pci_dev *pdev) if (mgbdev->vin[i]) mgb4_vin_free(mgbdev->vin[i]); +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mgbdev->debugfs); +#endif + device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups); free_spi(mgbdev); free_i2c(mgbdev); diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 7481f553f9..24ec576dc3 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -1488,7 +1488,9 @@ static int init_channel(struct ngene_channel *chan) } if (dev->ci.en && (io & NGENE_IO_TSOUT)) { - dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1); + ret = dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1); + if (ret != 0) + goto err; set_transfer(chan, 1); chan->dev->channel[2].DataFormatFlags = DF_SWAP32; set_transfer(&chan->dev->channel[2], 1); diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c index 2d7b0508cc..6f7d27a48e 100644 --- a/drivers/media/platform/cadence/cdns-csi2rx.c +++ b/drivers/media/platform/cadence/cdns-csi2rx.c @@ -239,10 +239,6 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx) writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG); - ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true); - if (ret) - goto err_disable_pclk; - /* Enable DPHY clk and data lanes. */ if (csi2rx->dphy) { reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST; @@ -252,6 +248,13 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx) } writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); + + ret = csi2rx_configure_ext_dphy(csi2rx); + if (ret) { + dev_err(csi2rx->dev, + "Failed to configure external DPHY: %d\n", ret); + goto err_disable_pclk; + } } /* @@ -291,14 +294,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx) reset_control_deassert(csi2rx->sys_rst); - if (csi2rx->dphy) { - ret = csi2rx_configure_ext_dphy(csi2rx); - if (ret) { - dev_err(csi2rx->dev, - "Failed to configure external DPHY: %d\n", ret); - goto err_disable_sysclk; - } - } + ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true); + if (ret) + goto err_disable_sysclk; clk_disable_unprepare(csi2rx->p_clk); @@ -312,6 +310,10 @@ err_disable_pixclk: clk_disable_unprepare(csi2rx->pixel_clk[i - 1]); } + if (csi2rx->dphy) { + writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); + phy_power_off(csi2rx->dphy); + } err_disable_pclk: clk_disable_unprepare(csi2rx->p_clk); diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c index 6bbe55de6c..ff23b225db 100644 --- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c +++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c @@ -79,6 +79,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use } fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) + return ERR_PTR(-ENOMEM); fw->type = SCP; fw->ops = &mtk_vcodec_rproc_msg; fw->scp = scp; diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c index a22b7dfc65..1a2b14a3e2 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c @@ -58,13 +58,15 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev) return 0; } -void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm) +int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm) { int ret; ret = pm_runtime_resume_and_get(pm->dev); if (ret) dev_err(pm->dev, "pm_runtime_resume_and_get fail: %d", ret); + + return ret; } void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm) diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h index 157ea08ba9..2e28f25e36 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h +++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h @@ -10,7 +10,7 @@ #include "mtk_vcodec_enc_drv.h" int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev); -void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm); +int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm); void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm); void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm); void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm); diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c index c402a686f3..e83747b8d6 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c @@ -64,7 +64,9 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx, ctx->dev->curr_ctx = ctx; spin_unlock_irqrestore(&ctx->dev->irqlock, flags); - mtk_vcodec_enc_pw_on(&ctx->dev->pm); + ret = mtk_vcodec_enc_pw_on(&ctx->dev->pm); + if (ret) + goto venc_if_encode_pw_on_err; mtk_vcodec_enc_clock_on(&ctx->dev->pm); ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf, bs_buf, result); @@ -75,6 +77,7 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx, ctx->dev->curr_ctx = NULL; spin_unlock_irqrestore(&ctx->dev->irqlock, flags); +venc_if_encode_pw_on_err: mtk_venc_unlock(ctx); return ret; } diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h index 792336dada..997a66318a 100644 --- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h +++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h @@ -59,7 +59,7 @@ enum rvin_isp_id { #define RVIN_REMOTES_MAX \ (((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \ - RVIN_CSI_MAX : RVIN_ISP_MAX) + (unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX) /** * enum rvin_dma_state - DMA states diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig index 47a8c0fb7e..99c401e653 100644 --- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig +++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig @@ -8,6 +8,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2 select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE select REGMAP_MMIO + select GENERIC_PHY select GENERIC_PHY_MIPI_DPHY help Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY. diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c index 6da83d0cff..22442fce76 100644 --- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c +++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c @@ -786,15 +786,14 @@ static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb) dev_warn(csi->dev, "Failed to drain DMA. Next frame might be bogus\n"); + spin_lock_irqsave(&dma->lock, flags); ret = ti_csi2rx_start_dma(csi, buf); if (ret) { - dev_err(csi->dev, "Failed to start DMA: %d\n", ret); - spin_lock_irqsave(&dma->lock, flags); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dma->state = TI_CSI2RX_DMA_IDLE; spin_unlock_irqrestore(&dma->lock, flags); + dev_err(csi->dev, "Failed to start DMA: %d\n", ret); } else { - spin_lock_irqsave(&dma->lock, flags); list_add_tail(&buf->list, &dma->submitted); spin_unlock_irqrestore(&dma->lock, flags); } diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index f1c5c0a6a3..e3e6aa87fe 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -62,7 +62,7 @@ struct shark_device { #ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; - char led_names[NO_LEDS][32]; + char led_names[NO_LEDS][64]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; #endif diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 790787f0eb..bcb24d8964 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -515,7 +515,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) alt = fc_usb->uintf->cur_altsetting; - if (alt->desc.bNumEndpoints < 1) + if (alt->desc.bNumEndpoints < 2) return -ENODEV; if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc)) return -ENODEV; diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c index 366f0e4a5d..e79c45db60 100644 --- a/drivers/media/usb/stk1160/stk1160-video.c +++ b/drivers/media/usb/stk1160/stk1160-video.c @@ -99,7 +99,7 @@ void stk1160_buffer_done(struct stk1160 *dev) static inline void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len) { - int linesdone, lineoff, lencopy; + int linesdone, lineoff, lencopy, offset; int bytesperline = dev->width * 2; struct stk1160_buffer *buf = dev->isoc_ctl.buf; u8 *dst = buf->mem; @@ -139,8 +139,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len) * Check if we have enough space left in the buffer. * In that case, we force loop exit after copy. */ - if (lencopy > buf->bytesused - buf->length) { - lencopy = buf->bytesused - buf->length; + offset = dst - (u8 *)buf->mem; + if (offset > buf->length) { + dev_warn_ratelimited(dev->dev, "out of bounds offset\n"); + return; + } + if (lencopy > buf->length - offset) { + lencopy = buf->length - offset; remain = lencopy; } @@ -182,8 +187,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len) * Check if we have enough space left in the buffer. * In that case, we force loop exit after copy. */ - if (lencopy > buf->bytesused - buf->length) { - lencopy = buf->bytesused - buf->length; + offset = dst - (u8 *)buf->mem; + if (offset > buf->length) { + dev_warn_ratelimited(dev->dev, "offset out of bounds\n"); + return; + } + if (lencopy > buf->length - offset) { + lencopy = buf->length - offset; remain = lencopy; } diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index bbd90123a4..91a41aa3ce 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> +#include <linux/usb/quirks.h> #include <linux/usb/uvc.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> @@ -2232,6 +2233,9 @@ static int uvc_probe(struct usb_interface *intf, goto error; } + if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME) + udev->quirks &= ~USB_QUIRK_RESET_RESUME; + uvc_dbg(dev, PROBE, "UVC device initialized\n"); usb_enable_autosuspend(udev); return 0; @@ -2574,6 +2578,33 @@ static const struct usb_device_id uvc_ids[] = { .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT) }, + /* Logitech Rally Bar Huddle */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x087c, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, + /* Logitech Rally Bar */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x089b, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, + /* Logitech Rally Bar Mini */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x08d3, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) }, /* Chicony CNF7129 (Asus EEE 100HE) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 6fb0a78b1b..88218693f6 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -73,6 +73,7 @@ #define UVC_QUIRK_FORCE_Y8 0x00000800 #define UVC_QUIRK_FORCE_BPP 0x00001000 #define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000 +#define UVC_QUIRK_NO_RESET_RESUME 0x00004000 /* Format flags */ #define UVC_FMT_FLAG_COMPRESSED 0x00000001 diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index 3ec323bd52..4bb0735878 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -563,6 +563,7 @@ void v4l2_async_nf_init(struct v4l2_async_notifier *notifier, { INIT_LIST_HEAD(¬ifier->waiting_list); INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); notifier->v4l2_dev = v4l2_dev; } EXPORT_SYMBOL(v4l2_async_nf_init); @@ -572,6 +573,7 @@ void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier, { INIT_LIST_HEAD(¬ifier->waiting_list); INIT_LIST_HEAD(¬ifier->done_list); + INIT_LIST_HEAD(¬ifier->notifier_entry); notifier->sd = sd; } EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init); @@ -618,16 +620,10 @@ err_unlock: int v4l2_async_nf_register(struct v4l2_async_notifier *notifier) { - int ret; - if (WARN_ON(!notifier->v4l2_dev == !notifier->sd)) return -EINVAL; - ret = __v4l2_async_nf_register(notifier); - if (ret) - notifier->v4l2_dev = NULL; - - return ret; + return __v4l2_async_nf_register(notifier); } EXPORT_SYMBOL(v4l2_async_nf_register); @@ -639,7 +635,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) v4l2_async_nf_unbind_all_subdevs(notifier); - list_del(¬ifier->notifier_entry); + list_del_init(¬ifier->notifier_entry); } void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier) diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index d13954bd31..bae73b8c52 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -1036,8 +1036,10 @@ int __video_register_device(struct video_device *vdev, vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); vdev->dev.parent = vdev->dev_parent; dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); + mutex_lock(&videodev_lock); ret = device_register(&vdev->dev); if (ret < 0) { + mutex_unlock(&videodev_lock); pr_err("%s: device_register failed\n", __func__); goto cleanup; } @@ -1057,6 +1059,7 @@ int __video_register_device(struct video_device *vdev, /* Part 6: Activate this minor. The char device can now be used. */ set_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); return 0; diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 4c6198c48d..19d20871af 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -412,15 +412,6 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable) if (WARN_ON(!!sd->enabled_streams == !!enable)) return 0; -#if IS_REACHABLE(CONFIG_LEDS_CLASS) - if (!IS_ERR_OR_NULL(sd->privacy_led)) { - if (enable) - led_set_brightness(sd->privacy_led, - sd->privacy_led->max_brightness); - else - led_set_brightness(sd->privacy_led, 0); - } -#endif ret = sd->ops->video->s_stream(sd, enable); if (!enable && ret < 0) { @@ -428,9 +419,20 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable) ret = 0; } - if (!ret) + if (!ret) { sd->enabled_streams = enable ? BIT(0) : 0; +#if IS_REACHABLE(CONFIG_LEDS_CLASS) + if (!IS_ERR_OR_NULL(sd->privacy_led)) { + if (enable) + led_set_brightness(sd->privacy_led, + sd->privacy_led->max_brightness); + else + led_set_brightness(sd->privacy_led, 0); + } +#endif + } + return ret; } @@ -732,6 +734,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, memset(&sel, 0, sizeof(sel)); sel.which = crop->which; sel.pad = crop->pad; + sel.stream = crop->stream; sel.target = V4L2_SEL_TGT_CROP; rval = v4l2_subdev_call( @@ -756,6 +759,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, memset(&sel, 0, sizeof(sel)); sel.which = crop->which; sel.pad = crop->pad; + sel.stream = crop->stream; sel.target = V4L2_SEL_TGT_CROP; sel.r = crop->rect; diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 95ef971b5e..b28701138b 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -19,7 +19,7 @@ KASAN_SANITIZE_rodata.o := n KCSAN_SANITIZE_rodata.o := n KCOV_INSTRUMENT_rodata.o := n OBJECT_FILES_NON_STANDARD_rodata.o := y -CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) +CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI) OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index b93404d656..5b861dbff2 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -61,7 +61,7 @@ static void *setup_function_descriptor(func_desc_t *fdesc, void *dst) return fdesc; } -static noinline void execute_location(void *dst, bool write) +static noinline __nocfi void execute_location(void *dst, bool write) { void (*func)(void); func_desc_t fdesc; diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c index 32af2b14ff..34c9be4374 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c @@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]), GFP_KERNEL); - if (!aux_bus->aux_device_wrapper[1]) - return -ENOMEM; + if (!aux_bus->aux_device_wrapper[1]) { + retval = -ENOMEM; + goto err_aux_dev_add_0; + } retval = ida_alloc(&gp_client_ida, GFP_KERNEL); if (retval < 0) @@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id err_aux_dev_add_1: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev); + goto err_aux_dev_add_0; err_aux_dev_init_1: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id); @@ -120,6 +123,7 @@ err_ida_alloc_1: err_aux_dev_add_0: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev); + goto err_ret; err_aux_dev_init_0: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id); @@ -127,6 +131,7 @@ err_aux_dev_init_0: err_ida_alloc_0: kfree(aux_bus->aux_device_wrapper[0]); +err_ret: return retval; } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 7f59dd38c3..6589635f8b 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device) } err = mei_restart(dev); - if (err) + if (err) { + free_irq(pdev->irq, dev); return err; + } /* Start timer if stopped in suspend */ schedule_delayed_work(&dev->timer_work, HZ); diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index b543e6b9f3..1ec65d8748 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev) static int mei_vsc_suspend(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct mei_device *mei_dev; + int ret = 0; - mei_stop(mei_dev); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; - mei_disable_interrupts(mei_dev); + mutex_lock(&mei_dev->device_lock); - vsc_tp_free_irq(hw->tp); + if (!mei_write_is_idle(mei_dev)) + ret = -EAGAIN; - return 0; + mutex_unlock(&mei_dev->device_lock); + + return ret; } static int mei_vsc_resume(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); - int ret; - - ret = vsc_tp_request_irq(hw->tp); - if (ret) - return ret; - - ret = mei_restart(mei_dev); - if (ret) - goto err_free; + struct mei_device *mei_dev; - /* start timer if stopped in suspend */ - schedule_delayed_work(&mei_dev->timer_work, HZ); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; return 0; - -err_free: - vsc_tp_free_irq(hw->tp); - - return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c index ffa4ccd96a..596a9d695d 100644 --- a/drivers/misc/mei/vsc-fw-loader.c +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader, { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER }; union acpi_object obj = { - .type = ACPI_TYPE_INTEGER, + .integer.type = ACPI_TYPE_INTEGER, .integer.value = 1, }; struct acpi_object_list arg_list = { diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c index 5d7ac07623..9a41ab6537 100644 --- a/drivers/misc/vmw_vmci/vmci_event.c +++ b/drivers/misc/vmw_vmci/vmci_event.c @@ -9,6 +9,7 @@ #include <linux/vmw_vmci_api.h> #include <linux/list.h> #include <linux/module.h> +#include <linux/nospec.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/rculist.h> @@ -86,9 +87,12 @@ static void event_deliver(struct vmci_event_msg *event_msg) { struct vmci_subscription *cur; struct list_head *subscriber_list; + u32 sanitized_event, max_vmci_event; rcu_read_lock(); - subscriber_list = &subscriber_array[event_msg->event_data.event]; + max_vmci_event = ARRAY_SIZE(subscriber_array); + sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event); + subscriber_list = &subscriber_array[sanitized_event]; list_for_each_entry_rcu(cur, subscriber_list, node) { cur->callback(cur->id, &event_msg->event_data, cur->callback_data); diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 4f8d962bb5..1300ccab3d 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -625,7 +625,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, if (!vmci_dev) { dev_err(&pdev->dev, "Can't allocate memory for VMCI device\n"); - return -ENOMEM; + error = -ENOMEM; + goto err_unmap_mmio_base; } vmci_dev->dev = &pdev->dev; @@ -642,7 +643,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, if (!vmci_dev->tx_buffer) { dev_err(&pdev->dev, "Can't allocate memory for datagram tx buffer\n"); - return -ENOMEM; + error = -ENOMEM; + goto err_unmap_mmio_base; } vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE, @@ -893,6 +895,10 @@ err_free_notification_bitmap: err_free_data_buffers: vmci_free_dg_buffers(vmci_dev); +err_unmap_mmio_base: + if (mmio_base != NULL) + pci_iounmap(pdev, mmio_base); + /* The rest are managed resources and will be freed by PCI core */ return error; } diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 39f45c2b6d..8791656e9e 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -221,6 +221,26 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, } EXPORT_SYMBOL(mmc_gpiod_request_cd); +/** + * mmc_gpiod_set_cd_config - set config for card-detection GPIO + * @host: mmc host + * @config: Generic pinconf config (from pinconf_to_config_packed()) + * + * This can be used by mmc host drivers to fixup a card-detection GPIO's config + * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor + * through mmc_gpiod_request_cd(). + * + * Returns: + * 0 on success, or a negative errno value on error. + */ +int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + + return gpiod_set_config(ctx->cd_gpio, config); +} +EXPORT_SYMBOL(mmc_gpiod_set_cd_config); + bool mmc_can_gpio_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 8bd9389196..d7427894e0 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1337,7 +1337,7 @@ ioremap_fail: return ret; } -static void __exit davinci_mmcsd_remove(struct platform_device *pdev) +static void davinci_mmcsd_remove(struct platform_device *pdev) { struct mmc_davinci_host *host = platform_get_drvdata(pdev); @@ -1392,7 +1392,7 @@ static struct platform_driver davinci_mmcsd_driver = { .of_match_table = davinci_mmc_dt_ids, }, .probe = davinci_mmcsd_probe, - .remove_new = __exit_p(davinci_mmcsd_remove), + .remove_new = davinci_mmcsd_remove, .id_table = davinci_mmc_devtype, }; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index acf5fc3ad7..eb8f427f97 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -10,6 +10,7 @@ #include <linux/export.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/io.h> @@ -80,6 +81,8 @@ struct sdhci_acpi_host { enum { DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0), DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1), + DMI_QUIRK_SD_CD_ACTIVE_HIGH = BIT(2), + DMI_QUIRK_SD_CD_ENABLE_PULL_UP = BIT(3), }; static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) @@ -719,9 +722,30 @@ static const struct acpi_device_id sdhci_acpi_ids[] = { }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); +/* Please keep this list sorted alphabetically */ static const struct dmi_system_id sdhci_acpi_quirks[] = { { /* + * The Acer Aspire Switch 10 (SW5-012) microSD slot always + * reports the card being write-protected even though microSD + * cards do not have a write-protect switch at all. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), + }, + .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, + }, + { + /* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"), + }, + .driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP, + }, + { + /* * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of * the SHC1 ACPI device, this bug causes it to reprogram the * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the @@ -736,15 +760,23 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = { }, { /* - * The Acer Aspire Switch 10 (SW5-012) microSD slot always - * reports the card being write-protected even though microSD - * cards do not have a write-protect switch at all. + * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this + * has broken WP reporting and an inverted CD signal. + * Note this has more or less the same BIOS as the Lenovo Yoga + * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike + * the 830 / 1050 models which share the same mainboard this + * model has a different mainboard and the inverted CD and + * broken WP are unique to this board. */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Full match so as to NOT match the 830/1050 BIOS */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"), }, - .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, + .driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT | + DMI_QUIRK_SD_CD_ACTIVE_HIGH), }, { /* @@ -757,6 +789,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = { }, .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, }, + { + /* + * The Toshiba WT10-A's microSD slot always reports the card being + * write-protected. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"), + }, + .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, + }, {} /* Terminating entry */ }; @@ -866,12 +909,18 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); + if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH) + host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; + err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); if (err) { if (err == -EPROBE_DEFER) goto err_free; dev_warn(dev, "failed to setup card detect gpio\n"); c->use_runtime_pm = false; + } else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) { + mmc_gpiod_set_cd_config(host->mmc, + PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000)); } if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c79f734599..746f4cf7ab 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3439,12 +3439,18 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) host->data->error = -EILSEQ; if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) sdhci_err_stats_inc(host, DAT_CRC); - } else if ((intmask & SDHCI_INT_DATA_CRC) && + } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) && SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) != MMC_BUS_TEST_R) { host->data->error = -EILSEQ; if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) sdhci_err_stats_inc(host, DAT_CRC); + if (intmask & SDHCI_INT_TUNING_ERROR) { + u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + ctrl2 &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); + } } else if (intmask & SDHCI_INT_ADMA_ERROR) { pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), intmask); @@ -3979,7 +3985,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, } else *cmd_error = 0; - if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) { + if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) { *data_error = -EILSEQ; if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) sdhci_err_stats_inc(host, DAT_CRC); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index a20864fc06..957c7a917f 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -158,6 +158,7 @@ #define SDHCI_INT_BUS_POWER 0x00800000 #define SDHCI_INT_AUTO_CMD_ERR 0x01000000 #define SDHCI_INT_ADMA_ERROR 0x02000000 +#define SDHCI_INT_TUNING_ERROR 0x04000000 #define SDHCI_INT_NORMAL_MASK 0x00007FFF #define SDHCI_INT_ERROR_MASK 0xFFFF8000 @@ -169,7 +170,7 @@ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \ - SDHCI_INT_BLK_GAP) + SDHCI_INT_BLK_GAP | SDHCI_INT_TUNING_ERROR) #define SDHCI_INT_ALL_MASK ((unsigned int)-1) #define SDHCI_CQE_INT_ERR_MASK ( \ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index d659c59422..562034af65 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -143,16 +143,24 @@ struct sdhci_am654_data { struct regmap *base; int otap_del_sel[ARRAY_SIZE(td)]; int itap_del_sel[ARRAY_SIZE(td)]; + u32 itap_del_ena[ARRAY_SIZE(td)]; int clkbuf_sel; int trm_icp; int drv_strength; int strb_sel; u32 flags; u32 quirks; + bool dll_enable; #define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0) }; +struct window { + u8 start; + u8 end; + u8 length; +}; + struct sdhci_am654_driver_data { const struct sdhci_pltfm_data *pdata; u32 flags; @@ -232,11 +240,13 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock) } static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654, - u32 itapdly) + u32 itapdly, u32 enable) { /* Set ITAPCHGWIN before writing to ITAPDLY */ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 1 << ITAPCHGWIN_SHIFT); + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK, + enable << ITAPDLYENA_SHIFT); regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK, itapdly << ITAPDLYSEL_SHIFT); regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0); @@ -253,8 +263,8 @@ static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654, mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK; regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val); - sdhci_am654_write_itapdly(sdhci_am654, - sdhci_am654->itap_del_sel[timing]); + sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing], + sdhci_am654->itap_del_ena[timing]); } static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) @@ -263,7 +273,6 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); unsigned char timing = host->mmc->ios.timing; u32 otap_del_sel; - u32 otap_del_ena; u32 mask, val; regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0); @@ -272,10 +281,9 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) /* Setup DLL Output TAP delay */ otap_del_sel = sdhci_am654->otap_del_sel[timing]; - otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0; mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; - val = (otap_del_ena << OTAPDLYENA_SHIFT) | + val = (0x1 << OTAPDLYENA_SHIFT) | (otap_del_sel << OTAPDLYSEL_SHIFT); /* Write to STRBSEL for HS400 speed mode */ @@ -290,10 +298,21 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock) regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); - if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) + if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) { sdhci_am654_setup_dll(host, clock); - else + sdhci_am654->dll_enable = true; + + if (timing == MMC_TIMING_MMC_HS400) { + sdhci_am654->itap_del_ena[timing] = 0x1; + sdhci_am654->itap_del_sel[timing] = sdhci_am654->itap_del_sel[timing - 1]; + } + + sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing], + sdhci_am654->itap_del_ena[timing]); + } else { sdhci_am654_setup_delay_chain(sdhci_am654, timing); + sdhci_am654->dll_enable = false; + } regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK, sdhci_am654->clkbuf_sel); @@ -306,6 +325,8 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); unsigned char timing = host->mmc->ios.timing; u32 otap_del_sel; + u32 itap_del_ena; + u32 itap_del_sel; u32 mask, val; /* Setup DLL Output TAP delay */ @@ -314,8 +335,19 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host, mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; val = (0x1 << OTAPDLYENA_SHIFT) | (otap_del_sel << OTAPDLYSEL_SHIFT); - regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); + /* Setup Input TAP delay */ + itap_del_ena = sdhci_am654->itap_del_ena[timing]; + itap_del_sel = sdhci_am654->itap_del_sel[timing]; + + mask |= ITAPDLYENA_MASK | ITAPDLYSEL_MASK; + val |= (itap_del_ena << ITAPDLYENA_SHIFT) | + (itap_del_sel << ITAPDLYSEL_SHIFT); + + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, + 1 << ITAPCHGWIN_SHIFT); + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val); + regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0); regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK, sdhci_am654->clkbuf_sel); @@ -408,40 +440,105 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) return 0; } -#define ITAP_MAX 32 +#define ITAPDLY_LENGTH 32 +#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1) + +static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window + *fail_window, u8 num_fails, bool circular_buffer) +{ + u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0; + u8 first_fail_start = 0, last_fail_end = 0; + struct device *dev = mmc_dev(host->mmc); + struct window pass_window = {0, 0, 0}; + int prev_fail_end = -1; + u8 i; + + if (!num_fails) + return ITAPDLY_LAST_INDEX >> 1; + + if (fail_window->length == ITAPDLY_LENGTH) { + dev_err(dev, "No passing ITAPDLY, return 0\n"); + return 0; + } + + first_fail_start = fail_window->start; + last_fail_end = fail_window[num_fails - 1].end; + + for (i = 0; i < num_fails; i++) { + start_fail = fail_window[i].start; + end_fail = fail_window[i].end; + pass_length = start_fail - (prev_fail_end + 1); + + if (pass_length > pass_window.length) { + pass_window.start = prev_fail_end + 1; + pass_window.length = pass_length; + } + prev_fail_end = end_fail; + } + + if (!circular_buffer) + pass_length = ITAPDLY_LAST_INDEX - last_fail_end; + else + pass_length = ITAPDLY_LAST_INDEX - last_fail_end + first_fail_start; + + if (pass_length > pass_window.length) { + pass_window.start = last_fail_end + 1; + pass_window.length = pass_length; + } + + if (!circular_buffer) + itap = pass_window.start + (pass_window.length >> 1); + else + itap = (pass_window.start + (pass_window.length >> 1)) % ITAPDLY_LENGTH; + + return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap; +} + static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host, u32 opcode) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); - int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len; - u32 itap; + unsigned char timing = host->mmc->ios.timing; + struct window fail_window[ITAPDLY_LENGTH]; + u8 curr_pass, itap; + u8 fail_index = 0; + u8 prev_pass = 1; + + memset(fail_window, 0, sizeof(fail_window)); /* Enable ITAPDLY */ - regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK, - 1 << ITAPDLYENA_SHIFT); + sdhci_am654->itap_del_ena[timing] = 0x1; + + for (itap = 0; itap < ITAPDLY_LENGTH; itap++) { + sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]); - for (itap = 0; itap < ITAP_MAX; itap++) { - sdhci_am654_write_itapdly(sdhci_am654, itap); + curr_pass = !mmc_send_tuning(host->mmc, opcode, NULL); - cur_val = !mmc_send_tuning(host->mmc, opcode, NULL); - if (cur_val && !prev_val) - pass_window = itap; + if (!curr_pass && prev_pass) + fail_window[fail_index].start = itap; - if (!cur_val) - fail_len++; + if (!curr_pass) { + fail_window[fail_index].end = itap; + fail_window[fail_index].length++; + } + + if (curr_pass && !prev_pass) + fail_index++; - prev_val = cur_val; + prev_pass = curr_pass; } - /* - * Having determined the length of the failing window and start of - * the passing window calculate the length of the passing window and - * set the final value halfway through it considering the range as a - * circular buffer - */ - pass_len = ITAP_MAX - fail_len; - itap = (pass_window + (pass_len >> 1)) % ITAP_MAX; - sdhci_am654_write_itapdly(sdhci_am654, itap); + + if (fail_window[fail_index].length != 0) + fail_index++; + + itap = sdhci_am654_calculate_itap(host, fail_window, fail_index, + sdhci_am654->dll_enable); + + sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]); + + /* Save ITAPDLY */ + sdhci_am654->itap_del_sel[timing] = itap; return 0; } @@ -590,9 +687,12 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host, host->mmc->caps2 &= ~td[i].capability; } - if (td[i].itap_binding) - device_property_read_u32(dev, td[i].itap_binding, - &sdhci_am654->itap_del_sel[i]); + if (td[i].itap_binding) { + ret = device_property_read_u32(dev, td[i].itap_binding, + &sdhci_am654->itap_del_sel[i]); + if (!ret) + sdhci_am654->itap_del_ena[i] = 0x1; + } } return 0; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 0de87bc638..6d5c755411 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -956,8 +956,10 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd) if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) { size = mtd_otp_size(mtd, true); - if (size < 0) - return size; + if (size < 0) { + err = size; + goto err; + } if (size > 0) { nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size, diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c index a74e64e0cf..c02e506088 100644 --- a/drivers/mtd/nand/raw/nand_hynix.c +++ b/drivers/mtd/nand/raw/nand_hynix.c @@ -401,7 +401,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip) if (ret) pr_warn("failed to initialize read-retry infrastructure"); - return 0; + return ret; } static void hynix_nand_extract_oobsize(struct nand_chip *chip, diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 7cab36f947..db55ffdb57 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -48,7 +48,9 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o obj-$(CONFIG_ARCNET) += arcnet/ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_CAN) += can/ -obj-$(CONFIG_NET_DSA) += dsa/ +ifdef CONFIG_NET_DSA +obj-y += dsa/ +endif obj-$(CONFIG_ETHERNET) += ethernet/ obj-$(CONFIG_FDDI) += fddi/ obj-$(CONFIG_HIPPI) += hippi/ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2c5ed0a7cb..bceda85f0d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -6477,16 +6477,16 @@ static int __init bonding_init(void) if (res) goto out; + bond_create_debugfs(); + res = register_pernet_subsys(&bond_net_ops); if (res) - goto out; + goto err_net_ops; res = bond_netlink_init(); if (res) goto err_link; - bond_create_debugfs(); - for (i = 0; i < max_bonds; i++) { res = bond_create(&init_net, NULL); if (res) @@ -6501,10 +6501,11 @@ static int __init bonding_init(void) out: return res; err: - bond_destroy_debugfs(); bond_netlink_fini(); err_link: unregister_pernet_subsys(&bond_net_ops); +err_net_ops: + bond_destroy_debugfs(); goto out; } @@ -6513,11 +6514,11 @@ static void __exit bonding_exit(void) { unregister_netdevice_notifier(&bond_netdev_notifier); - bond_destroy_debugfs(); - bond_netlink_fini(); unregister_pernet_subsys(&bond_net_ops); + bond_destroy_debugfs(); + #ifdef CONFIG_NET_POLL_CONTROLLER /* Make sure we don't have an imbalance on our netpoll blocking */ WARN_ON(atomic_read(&netpoll_block_tx)); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 2b510f150d..2a5861a88d 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -3050,7 +3050,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit) else interface = PHY_INTERFACE_MODE_MII; } else if (val == bitval[P_RMII_SEL]) { - interface = PHY_INTERFACE_MODE_RGMII; + interface = PHY_INTERFACE_MODE_RMII; } else { interface = PHY_INTERFACE_MODE_RGMII; if (data8 & P_RGMII_ID_EG_ENABLE) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 0918bd6fa8..5a202edfec 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3146,6 +3146,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip) static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) { struct gpio_desc *gpiod = chip->reset; + int err; /* If there is a GPIO connected to the reset pin, toggle it */ if (gpiod) { @@ -3154,17 +3155,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) * mid-byte, causing the first EEPROM read after the reset * from the wrong location resulting in the switch booting * to wrong mode and inoperable. + * For this reason, switch families with EEPROM support + * generally wait for EEPROM loads to complete as their pre- + * and post-reset handlers. */ - if (chip->info->ops->get_eeprom) - mv88e6xxx_g2_eeprom_wait(chip); + if (chip->info->ops->hardware_reset_pre) { + err = chip->info->ops->hardware_reset_pre(chip); + if (err) + dev_err(chip->dev, "pre-reset error: %d\n", err); + } gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); usleep_range(10000, 20000); - if (chip->info->ops->get_eeprom) - mv88e6xxx_g2_eeprom_wait(chip); + if (chip->info->ops->hardware_reset_post) { + err = chip->info->ops->hardware_reset_post(chip); + if (err) + dev_err(chip->dev, "post-reset error: %d\n", err); + } } } @@ -4394,6 +4404,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4584,6 +4596,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4684,6 +4698,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4778,6 +4794,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4836,6 +4854,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4892,6 +4912,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4951,6 +4973,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5004,6 +5028,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = { .watchdog_ops = &mv88e6250_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset, + .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done, .reset = mv88e6250_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, @@ -5051,6 +5077,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5110,6 +5138,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, @@ -5156,6 +5186,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, @@ -5206,6 +5238,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5361,6 +5395,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5423,6 +5459,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5485,6 +5523,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5550,6 +5590,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = { .watchdog_ops = &mv88e6393x_watchdog_ops, .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 85eb293381..c34caf9815 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -487,6 +487,12 @@ struct mv88e6xxx_ops { int (*ppu_enable)(struct mv88e6xxx_chip *chip); int (*ppu_disable)(struct mv88e6xxx_chip *chip); + /* Additional handlers to run before and after hard reset, to make sure + * that the switch and EEPROM are in a good state. + */ + int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip); + int (*hardware_reset_post)(struct mv88e6xxx_chip *chip); + /* Switch Software Reset */ int (*reset)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 49444a72ff..9820cd5967 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } +static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip) +{ + /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */ + int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM); + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val); + if (err) + return err; + + val |= MV88E6185_G1_CTL1_RELOAD_EEPROM; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val); + if (err) + return err; + + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0); +} + +/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */ +static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); + if (err < 0) { + dev_err(chip->dev, "Error reading status"); + return err; + } + + /* If the switch is still resetting, it may not + * respond on the bus, and so MDIO read returns + * 0xffff. Differentiate between that, and waiting for + * the EEPROM to be done by bit 0 being set. + */ + if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))) + return -EBUSY; + + return 0; +} + +/* As the EEInt (EEPROM done) flag clears on read if the status register, this + * function must be called directly after a hard reset or EEPROM ReLoad request, + * or the done condition may have been missed + */ +int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip) +{ + const unsigned long timeout = jiffies + 1 * HZ; + int ret; + + /* Wait up to 1 second for the switch to finish reading the + * EEPROM. + */ + while (time_before(jiffies, timeout)) { + ret = mv88e6xxx_g1_is_eeprom_done(chip); + if (ret != -EBUSY) + return ret; + } + + dev_err(chip->dev, "Timeout waiting for EEPROM done"); + return -ETIMEDOUT; +} + +int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip) +{ + int ret; + + ret = mv88e6xxx_g1_is_eeprom_done(chip); + if (ret != -EBUSY) + return ret; + + /* Pre-reset, we don't know the state of the switch - when + * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because + * the switch is actually busy reading the EEPROM, or because + * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated + * status register read already. + * + * To account for the latter case, trigger another EEPROM reload for + * another chance at seeing the done flag. + */ + ret = mv88e6250_g1_eeprom_reload(chip); + if (ret) + return ret; + + return mv88e6xxx_g1_wait_eeprom_done(chip); +} + /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 * Offset 0x02: Switch MAC Address Register Bytes 2 & 3 * Offset 0x03: Switch MAC Address Register Bytes 4 & 5 diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 1095261f5b..3dbb7a1b8f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip); +int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c index 811ebeeff4..43ac68052b 100644 --- a/drivers/net/dsa/qca/qca8k-leds.c +++ b/drivers/net/dsa/qca/qca8k-leds.c @@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", priv->internal_mdio_bus->id, port_num); - if (!init_data.devicename) + if (!init_data.devicename) { + fwnode_handle_put(led); + fwnode_handle_put(leds); return -ENOMEM; + } ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data); if (ret) @@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p kfree(init_data.devicename); } + fwnode_handle_put(leds); return 0; } @@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv) * the correct port for LED setup. */ ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num)); - if (ret) + if (ret) { + fwnode_handle_put(port); + fwnode_handle_put(ports); return ret; + } } + fwnode_handle_put(ports); return 0; } diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index e10ae94cf7..5ccb1a3a14 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -185,7 +185,12 @@ #define RTL8366RB_LED_BLINKRATE_222MS 0x0004 #define RTL8366RB_LED_BLINKRATE_446MS 0x0005 +/* LED trigger event for each group */ #define RTL8366RB_LED_CTRL_REG 0x0431 +#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ + (4 * (led_group)) +#define RTL8366RB_LED_CTRL_MASK(led_group) \ + (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) #define RTL8366RB_LED_OFF 0x0 #define RTL8366RB_LED_DUP_COL 0x1 #define RTL8366RB_LED_LINK_ACT 0x2 @@ -202,6 +207,11 @@ #define RTL8366RB_LED_LINK_TX 0xd #define RTL8366RB_LED_MASTER 0xe #define RTL8366RB_LED_FORCE 0xf + +/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only + * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is + * RTL8366RB_LED_FORCE. Otherwise, it is ignored. + */ #define RTL8366RB_LED_0_1_CTRL_REG 0x0432 #define RTL8366RB_LED_1_OFFSET 6 #define RTL8366RB_LED_2_3_CTRL_REG 0x0433 @@ -1002,27 +1012,19 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (priv->leds_disabled) { /* Turn everything off */ regmap_update_bits(priv->map, - RTL8366RB_LED_0_1_CTRL_REG, - 0x0FFF, 0); - regmap_update_bits(priv->map, - RTL8366RB_LED_2_3_CTRL_REG, - 0x0FFF, 0); - regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG, RTL8366RB_P4_RGMII_LED, 0); - val = RTL8366RB_LED_OFF; - } else { - /* TODO: make this configurable per LED */ - val = RTL8366RB_LED_FORCE; - } - for (i = 0; i < 4; i++) { - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_CTRL_REG, - 0xf << (i * 4), - val << (i * 4)); - if (ret) - return ret; + + for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) { + val = RTL8366RB_LED_OFF << RTL8366RB_LED_CTRL_OFFSET(i); + ret = regmap_update_bits(priv->map, + RTL8366RB_LED_CTRL_REG, + RTL8366RB_LED_CTRL_MASK(i), + val); + if (ret) + return ret; + } } ret = rtl8366_reset_vlan(priv); @@ -1167,52 +1169,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, } } -static void rb8366rb_set_port_led(struct realtek_priv *priv, - int port, bool enable) -{ - u16 val = enable ? 0x3f : 0; - int ret; - - if (priv->leds_disabled) - return; - - switch (port) { - case 0: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_0_1_CTRL_REG, - 0x3F, val); - break; - case 1: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_0_1_CTRL_REG, - 0x3F << RTL8366RB_LED_1_OFFSET, - val << RTL8366RB_LED_1_OFFSET); - break; - case 2: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_2_3_CTRL_REG, - 0x3F, val); - break; - case 3: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_2_3_CTRL_REG, - 0x3F << RTL8366RB_LED_3_OFFSET, - val << RTL8366RB_LED_3_OFFSET); - break; - case 4: - ret = regmap_update_bits(priv->map, - RTL8366RB_INTERRUPT_CONTROL_REG, - RTL8366RB_P4_RGMII_LED, - enable ? RTL8366RB_P4_RGMII_LED : 0); - break; - default: - dev_err(priv->dev, "no LED for port %d\n", port); - return; - } - if (ret) - dev_err(priv->dev, "error updating LED on port %d\n", port); -} - static int rtl8366rb_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) @@ -1226,7 +1182,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port, if (ret) return ret; - rb8366rb_set_port_led(priv, port, true); return 0; } @@ -1241,8 +1196,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port) BIT(port)); if (ret) return; - - rb8366rb_set_port_led(priv, port, false); } static int diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c index d2e8768053..a9c1702431 100644 --- a/drivers/net/dsa/realtek/rtl83xx.c +++ b/drivers/net/dsa/realtek/rtl83xx.c @@ -290,16 +290,13 @@ EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA); * rtl83xx_remove() - Cleanup a realtek switch driver * @priv: realtek_priv pointer * - * If a method is provided, this function asserts the hard reset of the switch - * in order to avoid leaking traffic when the driver is gone. + * Placehold for common cleanup procedures. * - * Context: Might sleep if priv->gdev->chip->can_sleep. + * Context: Any * Return: nothing */ void rtl83xx_remove(struct realtek_priv *priv) { - /* leave the device reset asserted */ - rtl83xx_reset_assert(priv); } EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA); diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 2d8a66ea82..713a595370 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, struct ena_com_io_sq *io_sq) { size_t size; - int dev_node = 0; memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); @@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, size = io_sq->desc_entry_size * io_sq->q_depth; if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { - dev_node = dev_to_node(ena_dev->dmadev); - set_dev_node(ena_dev->dmadev, ctx->numa_node); io_sq->desc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr, GFP_KERNEL); - set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->desc_addr.virt_addr) { io_sq->desc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, @@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, size = (size_t)io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num; - dev_node = dev_to_node(ena_dev->dmadev); - set_dev_node(ena_dev->dmadev, ctx->numa_node); io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); - set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->bounce_buf_ctrl.base_buffer) io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); @@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, struct ena_com_io_cq *io_cq) { size_t size; - int prev_node = 0; memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); @@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; - prev_node = dev_to_node(ena_dev->dmadev); - set_dev_node(ena_dev->dmadev, ctx->numa_node); io_cq->cdesc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); - set_dev_node(ena_dev->dmadev, prev_node); if (!io_cq->cdesc_addr.virt_addr) { io_cq->cdesc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 933e619b3a..4c6e07aa4b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -229,30 +229,43 @@ static struct ena_eth_io_rx_cdesc_base * idx * io_cq->cdesc_entry_size_in_bytes); } -static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, - u16 *first_cdesc_idx) +static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, + u16 *first_cdesc_idx, + u16 *num_descs) { + u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked; struct ena_eth_io_rx_cdesc_base *cdesc; - u16 count = 0, head_masked; u32 last = 0; do { + u32 status; + cdesc = ena_com_get_next_rx_cdesc(io_cq); if (!cdesc) break; + status = READ_ONCE(cdesc->status); ena_com_cq_inc_head(io_cq); + if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) { + struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq); + + netdev_err(dev->net_device, + "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n", + count, io_cq->qid, cdesc->req_id); + return -EFAULT; + } count++; - last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; + last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } while (!last); if (last) { *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; - count += io_cq->cur_rx_pkt_cdesc_count; head_masked = io_cq->head & (io_cq->q_depth - 1); + *num_descs = count; io_cq->cur_rx_pkt_cdesc_count = 0; io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; @@ -260,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", io_cq->qid, *first_cdesc_idx, count); } else { - io_cq->cur_rx_pkt_cdesc_count += count; - count = 0; + io_cq->cur_rx_pkt_cdesc_count = count; + *num_descs = 0; } - return count; + return 0; } static int ena_com_create_meta(struct ena_com_io_sq *io_sq, @@ -539,10 +552,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, u16 cdesc_idx = 0; u16 nb_hw_desc; u16 i = 0; + int rc; WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); - nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); + rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc); + if (unlikely(rc != 0)) + return -EFAULT; + if (nb_hw_desc == 0) { ena_rx_ctx->descs = nb_hw_desc; return 0; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index be5acfa41e..8db05f7544 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1347,6 +1347,8 @@ error: if (rc == -ENOSPC) { ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); + } else if (rc == -EFAULT) { + ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED); } else { ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 2c3d6a77ea..a2efebafd6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types { ENA_REGS_RESET_GENERIC = 13, ENA_REGS_RESET_MISS_INTERRUPT = 14, ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15, + ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16, }; /* ena_registers offsets */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2c2ee79c4d..0fab62a56f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -730,9 +730,6 @@ tx_done: return NETDEV_TX_OK; tx_dma_error: - if (BNXT_TX_PTP_IS_SET(lflags)) - atomic_inc(&bp->ptp_cfg->tx_avail); - last_frag = i; /* start back at beginning and unmap skb */ @@ -754,6 +751,8 @@ tx_dma_error: tx_free: dev_kfree_skb_any(skb); tx_kick_pending: + if (BNXT_TX_PTP_IS_SET(lflags)) + atomic_inc(&bp->ptp_cfg->tx_avail); if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); txr->tx_buf_ring[txr->tx_prod].skb = NULL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index dd849e715c..c46abcccfc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1419,6 +1419,57 @@ struct bnxt_l2_filter { atomic_t refcnt; }; +/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The + * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h. + * The last valid byte in the compat version is different. + */ +struct hwrm_port_phy_qcfg_output_compat { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + u8 active_fec_signal_mode; + __le16 link_speed; + u8 duplex_cfg; + u8 pause; + __le16 support_speeds; + __le16 force_link_speed; + u8 auto_mode; + u8 auto_pause; + __le16 auto_link_speed; + __le16 auto_link_speed_mask; + u8 wirespeed; + u8 lpbk; + u8 force_pause; + u8 module_status; + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + u8 media_type; + u8 xcvr_pkg_type; + u8 eee_config_phy_addr; + u8 parallel_detect; + __le16 link_partner_adv_speeds; + u8 link_partner_adv_auto_mode; + u8 link_partner_adv_pause; + __le16 adv_eee_link_speed_mask; + __le16 link_partner_adv_eee_link_speed_mask; + __le32 xcvr_identifier_type_tx_lpi_timer; + __le16 fec_cfg; + u8 duplex_state; + u8 option_flags; + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le16 support_pam4_speeds; + __le16 force_pam4_link_speed; + __le16 auto_pam4_link_speed_mask; + u8 link_partner_pam4_adv_speeds; + u8 valid; +}; + struct bnxt_link_info { u8 phy_type; u8 media_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 1df3d56cc4..d2fd2d04ed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) req_type); else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE) hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - req_type, token->seq_id, rc); + req_type, le16_to_cpu(ctx->req->seq_id), rc); rc = __hwrm_to_stderr(rc); exit: if (token) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 175192ebaa..22898d3d08 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_fwd_resp_input *req; int rc; - if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) + if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) { + netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n", + msg_size); return -EINVAL; + } rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); if (!rc) { @@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) rc = bnxt_hwrm_exec_fwd_resp( bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); } else { - struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; + struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {}; struct hwrm_port_phy_qcfg_input *phy_qcfg_req; phy_qcfg_req = @@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + /* New SPEEDS2 fields are beyond the legacy structure, so + * clear the SPEEDS2_SUPPORTED flag. + */ + phy_qcfg_resp.option_flags &= + ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED; phy_qcfg_resp.valid = 1; if (vf->flags & BNXT_VF_LINK_UP) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index aa6c0dfb6f..e26b4ed33d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct, pg_info->page_offset; memcpy(skb->data, va, MIN_SKB_SIZE); skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); } else { struct octeon_skb_page_info *pg_info = ((struct octeon_skb_page_info *)(skb->cb)); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d266a87297..54798df8e2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); if (port[IFLA_PORT_PROFILE]) { + if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_NAME; memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), PORT_PROFILE_MAX); } if (port[IFLA_PORT_INSTANCE_UUID]) { + if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_INSTANCE; memcpy(pp->instance_uuid, nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); } if (port[IFLA_PORT_HOST_UUID]) { + if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) { + memcpy(pp, &prev_pp, sizeof(*pp)); + return -EINVAL; + } pp->set |= ENIC_SET_HOST; memcpy(pp->host_uuid, nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 705c3eb19c..d1fbadbf86 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1107,10 +1107,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev, { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; + unsigned long flags; u32 val, mask; netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id); + spin_lock_irqsave(&geth->irq_lock, flags); + mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq); if (en) @@ -1119,6 +1122,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev, val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); val = en ? val | mask : val & ~mask; writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); + + spin_unlock_irqrestore(&geth->irq_lock, flags); } static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num) @@ -1415,15 +1420,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget) union gmac_rxdesc_3 word3; struct page *page = NULL; unsigned int page_offs; + unsigned long flags; unsigned short r, w; union dma_rwptr rw; dma_addr_t mapping; int frag_nr = 0; + spin_lock_irqsave(&geth->irq_lock, flags); rw.bits32 = readl(ptr_reg); /* Reset interrupt as all packages until here are taken into account */ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); + spin_unlock_irqrestore(&geth->irq_lock, flags); + r = rw.bits.rptr; w = rw.bits.wptr; @@ -1726,10 +1735,9 @@ static irqreturn_t gmac_irq(int irq, void *data) gmac_update_hw_stats(netdev); if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) { + spin_lock(&geth->irq_lock); writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8), geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); - - spin_lock(&geth->irq_lock); u64_stats_update_begin(&port->ir_stats_syncp); ++port->stats.rx_fifo_errors; u64_stats_update_end(&port->ir_stats_syncp); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 9f07f4947b..5c45f42232 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > priv->num_tx_rings) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)", + "Reserving %d XDP TXQs leaves under %d for stack (total %d)", num_xdp_tx_queues, priv->min_num_stack_tx_queues, priv->num_tx_rings); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8bd213da8f..881ece735d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3674,29 +3674,6 @@ fec_set_mac_address(struct net_device *ndev, void *p) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * fec_poll_controller - FEC Poll controller function - * @dev: The FEC network adapter - * - * Polled functionality used by netconsole and others in non interrupt mode - * - */ -static void fec_poll_controller(struct net_device *dev) -{ - int i; - struct fec_enet_private *fep = netdev_priv(dev); - - for (i = 0; i < FEC_IRQ_NUM; i++) { - if (fep->irq[i] > 0) { - disable_irq(fep->irq[i]); - fec_enet_interrupt(fep->irq[i], dev); - enable_irq(fep->irq[i]); - } - } -} -#endif - static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) { @@ -4003,9 +3980,6 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, .ndo_eth_ioctl = phy_do_ioctl_running, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fec_poll_controller, -#endif .ndo_set_features = fec_set_features, .ndo_bpf = fec_enet_bpf, .ndo_xdp_xmit = fec_enet_xdp_xmit, @@ -4156,6 +4130,14 @@ free_queue_mem: return ret; } +static void fec_enet_deinit(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + netif_napi_del(&fep->napi); + fec_enet_free_queue(ndev); +} + #ifdef CONFIG_OF static int fec_reset_phy(struct platform_device *pdev) { @@ -4550,6 +4532,7 @@ failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_irq: + fec_enet_deinit(ndev); failed_init: fec_ptp_stop(pdev); failed_reset: @@ -4613,6 +4596,7 @@ fec_drv_remove(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + fec_enet_deinit(ndev); free_netdev(ndev); } diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 181d9bfbee..e32f6724f5 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -104,14 +104,13 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) struct timespec64 ts; u64 ns; - if (fep->pps_enable == enable) - return 0; - - fep->pps_channel = DEFAULT_PPS_CHANNEL; - fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; - spin_lock_irqsave(&fep->tmreg_lock, flags); + if (fep->pps_enable == enable) { + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + return 0; + } + if (enable) { /* clear capture or output compare interrupt status if have. */ @@ -532,6 +531,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, int ret = 0; if (rq->type == PTP_CLK_REQ_PPS) { + fep->pps_channel = DEFAULT_PPS_CHANNEL; + fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; + ret = fec_ptp_enable_pps(fep, on); return ret; diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 8e8071308a..d165a999d3 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -581,11 +581,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb, skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type); } -static void gve_rx_free_skb(struct gve_rx_ring *rx) +static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) { if (!rx->ctx.skb_head) return; + if (rx->ctx.skb_head == napi->skb) + napi->skb = NULL; dev_kfree_skb_any(rx->ctx.skb_head); rx->ctx.skb_head = NULL; rx->ctx.skb_tail = NULL; @@ -884,7 +886,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num); if (err < 0) { - gve_rx_free_skb(rx); + gve_rx_free_skb(napi, rx); u64_stats_update_begin(&rx->statss); if (err == -ENOMEM) rx->rx_skb_alloc_fail++; @@ -927,7 +929,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) /* gve_rx_complete_skb() will consume skb if successful */ if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { - gve_rx_free_skb(rx); + gve_rx_free_skb(napi, rx); u64_stats_update_begin(&rx->statss); rx->rx_desc_err_dropped_pkt++; u64_stats_update_end(&rx->statss); diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index bc34b6cd3a..917a79a47e 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb) if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO)) return -1; + if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + return -EINVAL; + /* Needed because we will modify header. */ err = skb_cow_head(skb, 0); if (err < 0) return err; tcp = tcp_hdr(skb); - - /* Remove payload length from checksum. */ paylen = skb->len - skb_transport_offset(skb); - - switch (skb_shinfo(skb)->gso_type) { - case SKB_GSO_TCPV4: - case SKB_GSO_TCPV6: - csum_replace_by_diff(&tcp->check, - (__force __wsum)htonl(paylen)); - - /* Compute length of segmentation header. */ - header_len = skb_tcp_all_headers(skb); - break; - default: - return -EINVAL; - } + csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); + header_len = skb_tcp_all_headers(skb); if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO)) return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 19668a8d22..c9258b1b2b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3539,6 +3539,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) ret = hns3_alloc_and_attach_buffer(ring, i); if (ret) goto out_buffer_fail; + + if (!(i % HNS3_RESCHED_BD_NUM)) + cond_resched(); } return 0; @@ -5111,6 +5114,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) } u64_stats_init(&priv->ring[i].syncp); + cond_resched(); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index acd756b0c7..d36c4ed16d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -214,6 +214,8 @@ enum hns3_nic_state { #define HNS3_CQ_MODE_EQE 1U #define HNS3_CQ_MODE_CQE 0U +#define HNS3_RESCHED_BD_NUM 1024 + enum hns3_pkt_l2t_type { HNS3_L2_TYPE_UNICAST, HNS3_L2_TYPE_MULTICAST, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ce60332d83..990d7bd397 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3042,9 +3042,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev) static void hclge_update_link_status(struct hclge_dev *hdev) { - struct hnae3_handle *rhandle = &hdev->vport[0].roce; struct hnae3_handle *handle = &hdev->vport[0].nic; - struct hnae3_client *rclient = hdev->roce_client; struct hnae3_client *client = hdev->nic_client; int state; int ret; @@ -3068,8 +3066,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev) client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); - if (rclient && rclient->ops->link_status_change) - rclient->ops->link_status_change(rhandle, state); + + if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) { + struct hnae3_handle *rhandle = &hdev->vport[0].roce; + struct hnae3_client *rclient = hdev->roce_client; + + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, + state); + } hclge_push_link_status(hdev); } @@ -11245,6 +11250,12 @@ clear_roce: return ret; } +static bool hclge_uninit_need_wait(struct hclge_dev *hdev) +{ + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); +} + static void hclge_uninit_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -11253,7 +11264,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, if (hdev->roce_client) { clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); - while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + while (hclge_uninit_need_wait(hdev)) msleep(HCLGE_WAIT_RESET_DONE); hdev->roce_client->ops->uninit_instance(&vport->roce, 0); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 365c03d1c4..8e40f26aa5 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -412,7 +412,6 @@ struct ice_vsi { struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_tx_ring **xdp_rings; /* XDP ring array */ - unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -749,6 +748,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) } /** + * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID + * @vsi: pointer to VSI + * @qid: index of a queue to look at XSK buff pool presence + * + * Return: A pointer to xsk_buff_pool structure if there is a buffer pool + * attached and configured as zero-copy, NULL otherwise. + */ +static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, + u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); + + if (!ice_is_xdp_ena_vsi(vsi)) + return NULL; + + return (pool && pool->dev) ? pool : NULL; +} + +/** * ice_xsk_pool - get XSK buffer pool bound to a ring * @ring: Rx ring to use * @@ -760,10 +778,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) struct ice_vsi *vsi = ring->vsi; u16 qid = ring->q_index; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) - return NULL; - - return xsk_get_pool_from_qid(vsi->netdev, qid); + return ice_get_xp_from_qid(vsi, qid); } /** @@ -788,12 +803,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) if (!ring) return; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { - ring->xsk_pool = NULL; - return; - } - - ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); + ring->xsk_pool = ice_get_xp_from_qid(vsi, qid); } /** @@ -922,9 +932,17 @@ int ice_down(struct ice_vsi *vsi); int ice_down_up(struct ice_vsi *vsi); int ice_vsi_cfg_lan(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); + +enum ice_xdp_cfg { + ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */ + ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */ +}; + int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); -int ice_destroy_xdp_rings(struct ice_vsi *vsi); +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type); +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); +void ice_map_xdp_rings(struct ice_vsi *vsi); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index a545a7917e..9d23a436d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -860,6 +860,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) } rx_rings_rem -= rx_rings_per_v; } + + if (ice_is_xdp_ena_vsi(vsi)) + ice_map_xdp_rings(vsi); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d9f6cc71d9..e7d28432ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3135,6 +3135,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) case ICE_PHY_TYPE_HIGH_100G_AUI2: speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; break; + case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: + case ICE_PHY_TYPE_HIGH_200G_SR4: + case ICE_PHY_TYPE_HIGH_200G_FR4: + case ICE_PHY_TYPE_HIGH_200G_LR4: + case ICE_PHY_TYPE_HIGH_200G_DR4: + case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: + case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: + case ICE_PHY_TYPE_HIGH_200G_AUI4: + speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; + break; default: speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; break; diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index fc91c4d411..6e7d58243c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -1329,6 +1329,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, for (i = 0; i < count; i++) { bool last = false; + int try_cnt = 0; int status; bh = (struct ice_buf_hdr *)(bufs + start + i); @@ -1336,8 +1337,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, if (indicate_last) last = ice_is_last_download_buffer(bh, i, count); - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, - &offset, &info, NULL); + while (1) { + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, + last, &offset, &info, + NULL); + if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC && + hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG) + break; + + try_cnt++; + + if (try_cnt == 5) + break; + + msleep(20); + } + + if (try_cnt) + dev_dbg(ice_hw_to_dev(hw), + "ice_aq_download_pkg number of retries: %d\n", + try_cnt); /* Save AQ status from download package */ if (status) { @@ -1424,14 +1443,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, goto exit; } - conf_idx = le32_to_cpu(seg->signed_seg_idx); - start = le32_to_cpu(seg->signed_buf_start); count = le32_to_cpu(seg->signed_buf_count); - state = ice_download_pkg_sig_seg(hw, seg); - if (state) + if (state || !count) goto exit; + conf_idx = le32_to_cpu(seg->signed_seg_idx); + start = le32_to_cpu(seg->signed_buf_start); + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, count); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 78b833b3e1..62c8205fce 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3593,7 +3593,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) struct ice_pf *pf = vsi->back; int new_rx = 0, new_tx = 0; bool locked = false; - u32 curr_combined; int ret = 0; /* do not support changing channels in Safe Mode */ @@ -3615,22 +3614,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EOPNOTSUPP; } - curr_combined = ice_get_combined_cnt(vsi); - - /* these checks are for cases where user didn't specify a particular - * value on cmd line but we get non-zero value anyway via - * get_channels(); look at ethtool.c in ethtool repository (the user - * space part), particularly, do_schannels() routine - */ - if (ch->rx_count == vsi->num_rxq - curr_combined) - ch->rx_count = 0; - if (ch->tx_count == vsi->num_txq - curr_combined) - ch->tx_count = 0; - if (ch->combined_count == curr_combined) - ch->combined_count = 0; - - if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) { - netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n"); + if (ch->rx_count && ch->tx_count) { + netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 5584221203..acf732ce04 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->q_vectors) goto err_vectors; - vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); - if (!vsi->af_xdp_zc_qps) - goto err_zc_qps; - return 0; -err_zc_qps: - devm_kfree(dev, vsi->q_vectors); err_vectors: devm_kfree(dev, vsi->rxq_map); err_rxq_map: @@ -328,8 +322,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); - bitmap_free(vsi->af_xdp_zc_qps); - vsi->af_xdp_zc_qps = NULL; /* free the ring and vector containers */ devm_kfree(dev, vsi->q_vectors); vsi->q_vectors = NULL; @@ -2331,22 +2323,23 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) if (ret) goto unroll_vector_base; - ice_vsi_map_rings_to_vectors(vsi); - - /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi); - - vsi->stat_offsets_loaded = false; - if (ice_is_xdp_ena_vsi(vsi)) { ret = ice_vsi_determine_xdp_res(vsi); if (ret) goto unroll_vector_base; - ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); + ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, + ICE_XDP_CFG_PART); if (ret) goto unroll_vector_base; } + ice_vsi_map_rings_to_vectors(vsi); + + /* Associate q_vector rings to napi */ + ice_vsi_set_napi_queues(vsi); + + vsi->stat_offsets_loaded = false; + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ if (vsi->type != ICE_VSI_CTRL) /* Do not exit if configuring RSS had an issue, at @@ -2493,7 +2486,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi) /* return value check can be skipped here, it always returns * 0 if reset is in progress */ - ice_destroy_xdp_rings(vsi); + ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); ice_vsi_clear_rings(vsi); ice_vsi_free_q_vectors(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 33a164fa32..61eef3259c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -803,6 +803,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) } switch (vsi->port_info->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_200GB: + speed = "200 G"; + break; case ICE_AQ_LINK_SPEED_100GB: speed = "100 G"; break; @@ -2670,17 +2673,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) bpf_prog_put(old_prog); } +static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) +{ + struct ice_q_vector *q_vector; + struct ice_tx_ring *ring; + + if (static_key_enabled(&ice_xdp_locking_key)) + return vsi->xdp_rings[qid % vsi->num_xdp_txq]; + + q_vector = vsi->rx_rings[qid]->q_vector; + ice_for_each_tx_ring(ring, q_vector->tx) + if (ice_ring_is_xdp(ring)) + return ring; + + return NULL; +} + +/** + * ice_map_xdp_rings - Map XDP rings to interrupt vectors + * @vsi: the VSI with XDP rings being configured + * + * Map XDP rings to interrupt vectors and perform the configuration steps + * dependent on the mapping. + */ +void ice_map_xdp_rings(struct ice_vsi *vsi) +{ + int xdp_rings_rem = vsi->num_xdp_txq; + int v_idx, q_idx; + + /* follow the logic from ice_vsi_map_rings_to_vectors */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + int xdp_rings_per_v, q_id, q_base; + + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, + vsi->num_q_vectors - v_idx); + q_base = vsi->num_xdp_txq - xdp_rings_rem; + + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { + struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; + + xdp_ring->q_vector = q_vector; + xdp_ring->next = q_vector->tx.tx_ring; + q_vector->tx.tx_ring = xdp_ring; + } + xdp_rings_rem -= xdp_rings_per_v; + } + + ice_for_each_rxq(vsi, q_idx) { + vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, + q_idx); + ice_tx_xsk_pool(vsi, q_idx); + } +} + /** * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * @vsi: VSI to bring up Tx rings used by XDP * @prog: bpf program that will be assigned to VSI + * @cfg_type: create from scratch or restore the existing configuration * * Return 0 on success and negative value on error */ -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - int xdp_rings_rem = vsi->num_xdp_txq; struct ice_pf *pf = vsi->back; struct ice_qs_cfg xdp_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, @@ -2693,8 +2751,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .mapping_mode = ICE_VSI_MAP_CONTIG }; struct device *dev; - int i, v_idx; - int status; + int status, i; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2713,49 +2770,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) if (ice_xdp_alloc_setup_rings(vsi)) goto clear_xdp_rings; - /* follow the logic from ice_vsi_map_rings_to_vectors */ - ice_for_each_q_vector(vsi, v_idx) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; - int xdp_rings_per_v, q_id, q_base; - - xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, - vsi->num_q_vectors - v_idx); - q_base = vsi->num_xdp_txq - xdp_rings_rem; - - for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { - struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; - - xdp_ring->q_vector = q_vector; - xdp_ring->next = q_vector->tx.tx_ring; - q_vector->tx.tx_ring = xdp_ring; - } - xdp_rings_rem -= xdp_rings_per_v; - } - - ice_for_each_rxq(vsi, i) { - if (static_key_enabled(&ice_xdp_locking_key)) { - vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; - } else { - struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; - struct ice_tx_ring *ring; - - ice_for_each_tx_ring(ring, q_vector->tx) { - if (ice_ring_is_xdp(ring)) { - vsi->rx_rings[i]->xdp_ring = ring; - break; - } - } - } - ice_tx_xsk_pool(vsi, i); - } - /* omit the scheduler update if in reset path; XDP queues will be * taken into account at the end of ice_vsi_rebuild, where * ice_cfg_vsi_lan is being called */ - if (ice_is_reset_in_progress(pf->state)) + if (cfg_type == ICE_XDP_CFG_PART) return 0; + ice_map_xdp_rings(vsi); + /* tell the Tx scheduler that right now we have * additional queues */ @@ -2805,22 +2828,21 @@ err_map_xdp: /** * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings * @vsi: VSI to remove XDP rings + * @cfg_type: disable XDP permanently or allow it to be restored later * * Detach XDP rings from irq vectors, clean up the PF bitmap and free * resources */ -int ice_destroy_xdp_rings(struct ice_vsi *vsi) +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; int i, v_idx; /* q_vectors are freed in reset path so there's no point in detaching - * rings; in case of rebuild being triggered not from reset bits - * in pf->state won't be set, so additionally check first q_vector - * against NULL + * rings */ - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) goto free_qmap; ice_for_each_q_vector(vsi, v_idx) { @@ -2861,7 +2883,7 @@ free_qmap: if (static_key_enabled(&ice_xdp_locking_key)) static_branch_dec(&ice_xdp_locking_key); - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) return 0; ice_vsi_assign_bpf_prog(vsi, NULL); @@ -2972,7 +2994,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); } else { - xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, + ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } @@ -2983,7 +3006,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_features_clear_redirect_target(vsi->netdev); - xdp_ring_err = ice_destroy_xdp_rings(vsi); + xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); /* reallocate Rx queues that were used for zero-copy */ @@ -5431,7 +5454,7 @@ static int __maybe_unused ice_suspend(struct device *dev) */ disabled = ice_service_task_stop(pf); - ice_unplug_aux_dev(pf); + ice_deinit_rdma(pf); /* Already suspended?, then there is nothing to do */ if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { @@ -5511,6 +5534,11 @@ static int __maybe_unused ice_resume(struct device *dev) if (ret) dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); + ret = ice_init_rdma(pf); + if (ret) + dev_err(dev, "Reinitialize RDMA during resume failed: %d\n", + ret); + clear_bit(ICE_DOWN, pf->state); /* Now perform PF reset and rebuild */ reset_type = ICE_RESET_PFR; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index d4e05d2cb3..8510a02afe 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -375,11 +375,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. + * + * Note that the Shadow RAM copy is always located after the CSS header, and + * is aligned to 64-byte (32-word) offsets. */ static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); + u32 sr_copy; + + switch (bank) { + case ICE_ACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32); + break; + case ICE_INACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32); + break; + } + + return ice_read_nvm_module(hw, bank, sr_copy + offset, data); } /** @@ -441,8 +455,7 @@ int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - u16 pfa_len, pfa_ptr; - u16 next_tlv; + u16 pfa_len, pfa_ptr, next_tlv, max_tlv; int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); @@ -455,11 +468,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); return status; } + + /* The Preserved Fields Area contains a sequence of Type-Length-Value + * structures which define its contents. The PFA length includes all + * of the TLVs, plus the initial length word itself, *and* one final + * word at the end after all of the TLVs. + */ + if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) { + dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n", + pfa_ptr, pfa_len); + return -EINVAL; + } + /* Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; - while (next_tlv < pfa_ptr + pfa_len) { + while (next_tlv < max_tlv) { u16 tlv_sub_module_type; u16 tlv_len; @@ -483,10 +508,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, } return -EINVAL; } - /* Check next TLV, i.e. current TLV pointer + length + 2 words - * (for current TLV's type and length) - */ - next_tlv = next_tlv + tlv_len + 2; + + if (check_add_overflow(next_tlv, 2, &next_tlv) || + check_add_overflow(next_tlv, tlv_len, &next_tlv)) { + dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n", + tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len); + return -EINVAL; + } } /* Module does not exist */ return -ENOENT; @@ -1011,6 +1039,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw) } /** + * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @hdr_len: storage for header length in words + * + * Read the CSS header length from the NVM CSS header and add the Authentication + * header size, and then convert to words. + * + * Return: zero on success, or a negative error code on failure. + */ +static int +ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank, + u32 *hdr_len) +{ + u16 hdr_len_l, hdr_len_h; + u32 hdr_len_dword; + int status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L, + &hdr_len_l); + if (status) + return status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H, + &hdr_len_h); + if (status) + return status; + + /* CSS header length is in DWORD, so convert to words and add + * authentication header size + */ + hdr_len_dword = hdr_len_h << 16 | hdr_len_l; + *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN; + + return 0; +} + +/** + * ice_determine_css_hdr_len - Discover CSS header length for the device + * @hw: pointer to the HW struct + * + * Determine the size of the CSS header at the start of the NVM module. This + * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is + * always located just after the CSS header. + * + * Return: zero on success, or a negative error code on failure. + */ +static int ice_determine_css_hdr_len(struct ice_hw *hw) +{ + struct ice_bank_info *banks = &hw->flash.banks; + int status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK, + &banks->active_css_hdr_len); + if (status) + return status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK, + &banks->inactive_css_hdr_len); + if (status) + return status; + + return 0; +} + +/** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct * @@ -1056,6 +1150,12 @@ int ice_init_nvm(struct ice_hw *hw) return status; } + status = ice_determine_css_hdr_len(hw); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n"); + return status; + } + status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index b4ea935e83..1472385eb6 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -1825,7 +1825,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || - lkup_type == ICE_SW_LKUP_DFLT) { + lkup_type == ICE_SW_LKUP_DFLT || + lkup_type == ICE_SW_LKUP_LAST) { sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); } else if (lkup_type == ICE_SW_LKUP_VLAN) { if (opc == ice_aqc_opc_alloc_res) @@ -2759,7 +2760,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || - lkup_type == ICE_SW_LKUP_DFLT) + lkup_type == ICE_SW_LKUP_DFLT || + lkup_type == ICE_SW_LKUP_LAST) rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : ICE_AQC_SW_RULES_T_VSI_LIST_SET; else if (lkup_type == ICE_SW_LKUP_VLAN) diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 9ff92dba58..0aacd0d050 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -481,6 +481,8 @@ struct ice_bank_info { u32 orom_size; /* Size of OROM bank */ u32 netlist_ptr; /* Pointer to 1st Netlist bank */ u32 netlist_size; /* Size of Netlist bank */ + u32 active_css_hdr_len; /* Active CSS header length */ + u32 inactive_css_hdr_len; /* Inactive CSS header length */ enum ice_flash_bank nvm_bank; /* Active NVM bank */ enum ice_flash_bank orom_bank; /* Active OROM bank */ enum ice_flash_bank netlist_bank; /* Active Netlist bank */ @@ -1084,17 +1086,13 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 /* CSS Header words */ +#define ICE_NVM_CSS_HDR_LEN_L 0x02 +#define ICE_NVM_CSS_HDR_LEN_H 0x03 #define ICE_NVM_CSS_SREV_L 0x14 #define ICE_NVM_CSS_SREV_H 0x15 -/* Length of CSS header section in words */ -#define ICE_CSS_HEADER_LENGTH 330 - -/* Offset of Shadow RAM copy in the NVM bank area. */ -#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32) - -/* Size in bytes of Option ROM trailer */ -#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH) +/* Length of Authentication header section in words */ +#define ICE_NVM_AUTH_HEADER_LEN 0x08 /* The Link Topology Netlist section is stored as a series of words. It is * stored in the NVM as a TLV, with the first two words containing the type diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c index 2e9ad27cb9..6e8f2aab60 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c @@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) return -EINVAL; err = ice_fltr_add_vlan(vsi, vlan); - if (err && err != -EEXIST) { + if (!err) + vsi->num_vlan++; + else if (err == -EEXIST) + err = 0; + else dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n", vlan->vid, vsi->vsi_num, err); - return err; - } - vsi->num_vlan++; - return 0; + return err; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 1857220d27..86a865788e 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) if (!pool) return -EINVAL; - clear_bit(qid, vsi->af_xdp_zc_qps); xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); return 0; @@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) if (err) return err; - set_bit(qid, vsi->af_xdp_zc_qps); - return 0; } @@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) { struct ice_rx_ring *rx_ring; - unsigned long q; + uint i; + + ice_for_each_rxq(vsi, i) { + rx_ring = vsi->rx_rings[i]; + if (!rx_ring->xsk_pool) + continue; - for_each_set_bit(q, vsi->af_xdp_zc_qps, - max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) { - rx_ring = vsi->rx_rings[q]; if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 986d429d11..1885ba6189 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -222,14 +222,19 @@ static int idpf_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct idpf_vport_config *vport_config; - u16 combined, num_txq, num_rxq; unsigned int num_req_tx_q; unsigned int num_req_rx_q; struct idpf_vport *vport; + u16 num_txq, num_rxq; struct device *dev; int err = 0; u16 idx; + if (ch->rx_count && ch->tx_count) { + netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n"); + return -EINVAL; + } + idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); @@ -239,20 +244,6 @@ static int idpf_set_channels(struct net_device *netdev, num_txq = vport_config->user_config.num_req_tx_qs; num_rxq = vport_config->user_config.num_req_rx_qs; - combined = min(num_txq, num_rxq); - - /* these checks are for cases where user didn't specify a particular - * value on cmd line but we get non-zero value anyway via - * get_channels(); look at ethtool.c in ethtool repository (the user - * space part), particularly, do_schannels() routine - */ - if (ch->combined_count == combined) - ch->combined_count = 0; - if (ch->combined_count && ch->rx_count == num_rxq - combined) - ch->rx_count = 0; - if (ch->combined_count && ch->tx_count == num_txq - combined) - ch->tx_count = 0; - num_req_tx_q = ch->combined_count + ch->tx_count; num_req_rx_q = ch->combined_count + ch->rx_count; @@ -376,7 +367,8 @@ static int idpf_set_ringparam(struct net_device *netdev, new_tx_count); if (new_tx_count == vport->txq_desc_count && - new_rx_count == vport->rxq_desc_count) + new_rx_count == vport->rxq_desc_count && + kring->tcp_data_split == idpf_vport_get_hsplit(vport)) goto unlock_mutex; if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 5d3532c27d..ae8a48c480 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1394,6 +1394,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res) } idpf_rx_init_buf_tail(vport); + idpf_vport_intr_ena(vport); err = idpf_send_config_queues_msg(vport); if (err) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index f5bc4a2780..7fc77ed9d1 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3747,9 +3747,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport) */ void idpf_vport_intr_deinit(struct idpf_vport *vport) { + idpf_vport_intr_dis_irq_all(vport); idpf_vport_intr_napi_dis_all(vport); idpf_vport_intr_napi_del_all(vport); - idpf_vport_intr_dis_irq_all(vport); idpf_vport_intr_rel_irq(vport); } @@ -4180,7 +4180,6 @@ int idpf_vport_intr_init(struct idpf_vport *vport) idpf_vport_intr_map_vector_to_qs(vport); idpf_vport_intr_napi_add_all(vport); - idpf_vport_intr_napi_ena_all(vport); err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); if (err) @@ -4194,17 +4193,20 @@ int idpf_vport_intr_init(struct idpf_vport *vport) if (err) goto unroll_vectors_alloc; - idpf_vport_intr_ena_irq_all(vport); - return 0; unroll_vectors_alloc: - idpf_vport_intr_napi_dis_all(vport); idpf_vport_intr_napi_del_all(vport); return err; } +void idpf_vport_intr_ena(struct idpf_vport *vport) +{ + idpf_vport_intr_napi_ena_all(vport); + idpf_vport_intr_ena_irq_all(vport); +} + /** * idpf_config_rss - Send virtchnl messages to configure RSS * @vport: virtual port diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index df76493faa..85a1466890 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -988,6 +988,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport); void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector); void idpf_vport_intr_deinit(struct idpf_vport *vport); int idpf_vport_intr_init(struct idpf_vport *vport); +void idpf_vport_intr_ena(struct idpf_vport *vport); enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded); int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport); diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 1a64f1ca6c..e699412d22 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev, struct igc_hw *hw = &adapter->hw; u32 eeer; + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + edata->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + edata->supported); + if (hw->dev_spec._base.eee_enable) mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert); - *edata = adapter->eee; - eeer = rd32(IGC_EEER); /* EEE status on negotiated link */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 4d975d620a..58bc96021b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -12,6 +12,7 @@ #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> #include <linux/pci.h> +#include <linux/mdio.h> #include <net/ipv6.h> @@ -4876,6 +4877,9 @@ void igc_up(struct igc_adapter *adapter) /* start the watchdog. */ hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); + + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T | + MDIO_EEE_2_5GT; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index ed440dd0c4..84fb6b8de2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3676,9 +3676,7 @@ struct ixgbe_info { #define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) -#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238) #define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) -#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918) #define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) #define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) @@ -3688,7 +3686,6 @@ struct ixgbe_info { #define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) -#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) #define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 2decb0710b..a5f6449344 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1722,59 +1722,9 @@ static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) return -EINVAL; } - (void)mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - /* change mode enforcement rules to hybrid */ - (void)mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - reg_val |= 0x0400; - - (void)mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - /* manually control the config */ - (void)mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - reg_val |= 0x20002240; - - (void)mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - /* move the AN base page values */ - (void)mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - reg_val |= 0x1; - - (void)mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - /* set the AN37 over CB mode */ - (void)mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - reg_val |= 0x20000000; - - (void)mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); - - /* restart AN manually */ - (void)mac->ops.read_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - - (void)mac->ops.write_iosf_sb_reg(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + status = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* Toggle port SW reset by AN reset. */ status = ixgbe_restart_an_internal_phy_x550em(hw); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 23adf53c2a..cebc79a710 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4013,7 +4013,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, } } - skb = build_skb(data, frag_size); + if (frag_size) + skb = build_skb(data, frag_size); + else + skb = slab_build_skb(data); if (!skb) { netdev_warn(port->dev, "skb build failed\n"); goto err_drop_frame; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index e8b73b9d75..97722ce8c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, * - when available free entries are less. * Lower priority ones out of avaialble free entries are always * chosen when 'high vs low' question arises. + * + * For a VF base MCAM match rule is set by its PF. And all the + * further MCAM rules installed by VF on its own are + * concatenated with the base rule set by its PF. Hence PF entries + * should be at lower priority compared to VF entries. Otherwise + * base rule is hit always and rules installed by VF will be of + * no use. Hence if the request is from PF then allocate low + * priority entries. */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + goto lprio_alloc; /* Get the search range for priority allocation request */ if (req->priority) { @@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, goto alloc; } - /* For a VF base MCAM match rule is set by its PF. And all the - * further MCAM rules installed by VF on its own are - * concatenated with the base rule set by its PF. Hence PF entries - * should be at lower priority compared to VF entries. Otherwise - * base rule is hit always and rules installed by VF will be of - * no use. Hence if the request is from PF and NOT a priority - * allocation request then allocate low priority entries. - */ - if (!(pcifunc & RVU_PFVF_FUNC_MASK)) - goto lprio_alloc; - /* Find out the search range for non-priority allocation request * * Get MCAM free entry count in middle zone. @@ -2568,6 +2567,18 @@ lprio_alloc: reverse = true; start = 0; end = mcam->bmap_entries; + /* Ensure PF requests are always at bottom and if PF requests + * for higher/lower priority entry wrt reference entry then + * honour that criteria and start search for entries from bottom + * and not in mid zone. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_HIGHER_PRIO) + end = req->ref_entry; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_LOWER_PRIO) + start = req->ref_entry; } alloc: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 5664f768cb..64a97a0a10 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ otx2_devlink.o qos_sq.o qos.o -rvu_nicvf-y := otx2_vf.o otx2_devlink.o +rvu_nicvf-y := otx2_vf.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o -rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index 28fb643d29..aa01110f04 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf) return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_config); static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) { @@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf) return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_alloc); static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio) { @@ -260,6 +262,7 @@ update_sq_smq_map: return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_update); int otx2_pfc_txschq_stop(struct otx2_nic *pfvf) { @@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf) return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_stop); int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) { @@ -321,6 +325,7 @@ unlock: mutex_unlock(&pfvf->mbox.lock); return err; } +EXPORT_SYMBOL(otx2_config_priority_flow_ctrl); void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable) @@ -385,6 +390,7 @@ out: "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n", qidx, err); } +EXPORT_SYMBOL(otx2_update_bpid_in_rqctx); static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { @@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev) return 0; } +EXPORT_SYMBOL(otx2_dcbnl_set_ops); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 4e11304965..05956bf03c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -112,6 +112,7 @@ err_dl: devlink_free(dl); return err; } +EXPORT_SYMBOL(otx2_register_dl); void otx2_unregister_dl(struct otx2_nic *pfvf) { @@ -123,3 +124,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf) ARRAY_SIZE(otx2_dl_params)); devlink_free(dl); } +EXPORT_SYMBOL(otx2_unregister_dl); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index f828d32737..04a49b9b54 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1171,8 +1171,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { /* Insert vlan tag before giving pkt to tso */ - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tag_present(skb)) { skb = __vlan_hwaccel_push_inside(skb); + if (!skb) + return true; + } otx2_sq_append_tso(pfvf, sq, skb, qidx); return true; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 1723e9912a..6cddb4da85 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -1407,7 +1407,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, otx2_qos_read_txschq_cfg(pfvf, node, old_cfg); /* delete the txschq nodes allocated for this node */ + otx2_qos_disable_sq(pfvf, qid); + otx2_qos_free_hw_node_schq(pfvf, node); otx2_qos_free_sw_node_schq(pfvf, node); + pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; /* mark this node as htb inner node */ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); @@ -1554,6 +1557,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force dwrr_del_node = true; /* destroy the leaf node */ + otx2_qos_disable_sq(pfvf, qid); otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index caa13b9ced..41d9b0684b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_reg_map = { .tx_irq_mask = 0x461c, .tx_irq_status = 0x4618, .pdma = { - .rx_ptr = 0x6100, - .rx_cnt_cfg = 0x6104, - .pcrx_ptr = 0x6108, - .glo_cfg = 0x6204, - .rst_idx = 0x6208, - .delay_irq = 0x620c, - .irq_status = 0x6220, - .irq_mask = 0x6228, - .adma_rx_dbg0 = 0x6238, - .int_grp = 0x6250, + .rx_ptr = 0x4100, + .rx_cnt_cfg = 0x4104, + .pcrx_ptr = 0x4108, + .glo_cfg = 0x4204, + .rst_idx = 0x4208, + .delay_irq = 0x420c, + .irq_status = 0x4220, + .irq_mask = 0x4228, + .adma_rx_dbg0 = 0x4238, + .int_grp = 0x4250, }, .qdma = { .qtx_cfg = 0x4400, @@ -1107,7 +1107,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); } @@ -1131,51 +1131,57 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; dma_addr_t phy_ring_tail; - int cnt = MTK_QDMA_RING_SIZE; + int cnt = soc->tx.fq_dma_size; dma_addr_t dma_addr; - int i; + int i, j, len; if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) eth->scratch_ring = eth->sram_base; else eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, - cnt * soc->txrx.txd_size, + cnt * soc->tx.desc_size, ð->phy_scratch_ring, GFP_KERNEL); + if (unlikely(!eth->scratch_ring)) return -ENOMEM; - eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); - if (unlikely(!eth->scratch_head)) - return -ENOMEM; + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); - dma_addr = dma_map_single(eth->dma_dev, - eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; + for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) { + len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH); + eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); - phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); + if (unlikely(!eth->scratch_head[j])) + return -ENOMEM; - for (i = 0; i < cnt; i++) { - dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE; - struct mtk_tx_dma_v2 *txd; + dma_addr = dma_map_single(eth->dma_dev, + eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); - txd = eth->scratch_ring + i * soc->txrx.txd_size; - txd->txd1 = addr; - if (i < cnt - 1) - txd->txd2 = eth->phy_scratch_ring + - (i + 1) * soc->txrx.txd_size; + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; - txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); - if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) - txd->txd3 |= TX_DMA_PREP_ADDR64(addr); - txd->txd4 = 0; - if (mtk_is_netsys_v2_or_greater(eth)) { - txd->txd5 = 0; - txd->txd6 = 0; - txd->txd7 = 0; - txd->txd8 = 0; + for (i = 0; i < cnt; i++) { + struct mtk_tx_dma_v2 *txd; + + txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; + txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; + if (j * MTK_FQ_DMA_LENGTH + i < cnt) + txd->txd2 = eth->phy_scratch_ring + + (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size; + + txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); + if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) + txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE); + + txd->txd4 = 0; + if (mtk_is_netsys_v2_or_greater(eth)) { + txd->txd5 = 0; + txd->txd6 = 0; + txd->txd7 = 0; + txd->txd8 = 0; + } } } @@ -1416,7 +1422,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (itxd == ring->last_free) return -ENOMEM; - itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size); memset(itx_buf, 0, sizeof(*itx_buf)); txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, @@ -1457,7 +1463,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); txd_info.size = min_t(unsigned int, frag_size, - soc->txrx.dma_max_len); + soc->tx.dma_max_len); txd_info.qid = queue; txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && !(frag_size - txd_info.size); @@ -1470,7 +1476,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, mtk_tx_set_dma_desc(dev, txd, &txd_info); tx_buf = mtk_desc_to_tx_buf(ring, txd, - soc->txrx.txd_size); + soc->tx.desc_size); if (new_desc) memset(tx_buf, 0, sizeof(*tx_buf)); tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; @@ -1513,7 +1519,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, } else { int next_idx; - next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size), + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size), ring->dma_size); mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); } @@ -1522,7 +1528,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, err_dma: do { - tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size); /* unmap dma */ mtk_tx_unmap(eth, tx_buf, NULL, false); @@ -1547,7 +1553,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; nfrags += DIV_ROUND_UP(skb_frag_size(frag), - eth->soc->txrx.dma_max_len); + eth->soc->tx.dma_max_len); } } else { nfrags += skb_shinfo(skb)->nr_frags; @@ -1654,7 +1660,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) ring = ð->rx_ring[i]; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - rxd = ring->dma + idx * eth->soc->txrx.rxd_size; + rxd = ring->dma + idx * eth->soc->rx.desc_size; if (rxd->rxd2 & RX_DMA_DONE) { ring->calc_idx_update = true; return ring; @@ -1822,7 +1828,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, } htxd = txd; - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size); memset(tx_buf, 0, sizeof(*tx_buf)); htx_buf = tx_buf; @@ -1841,7 +1847,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, goto unmap; tx_buf = mtk_desc_to_tx_buf(ring, txd, - soc->txrx.txd_size); + soc->tx.desc_size); memset(tx_buf, 0, sizeof(*tx_buf)); n_desc++; } @@ -1879,7 +1885,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, } else { int idx; - idx = txd_to_idx(ring, txd, soc->txrx.txd_size); + idx = txd_to_idx(ring, txd, soc->tx.desc_size); mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), MT7628_TX_CTX_IDX0); } @@ -1890,7 +1896,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, unmap: while (htxd != txd) { - tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size); mtk_tx_unmap(eth, tx_buf, NULL, false); htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; @@ -2021,14 +2027,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto rx_done; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - rxd = ring->dma + idx * eth->soc->txrx.rxd_size; + rxd = ring->dma + idx * eth->soc->rx.desc_size; data = ring->data[idx]; if (!mtk_rx_get_desc(eth, &trxd, rxd)) break; /* find out which mac the packet come from. values start at 1 */ - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5); switch (val) { @@ -2140,7 +2146,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb->dev = netdev; bytes += skb->len; - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5); hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; if (hash != MTK_RXD5_FOE_ENTRY) @@ -2156,7 +2162,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, rxdcsum = &trxd.rxd4; } - if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) + if (*rxdcsum & eth->soc->rx.dma_l4_valid) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); @@ -2280,7 +2286,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, break; tx_buf = mtk_desc_to_tx_buf(ring, desc, - eth->soc->txrx.txd_size); + eth->soc->tx.desc_size); if (!tx_buf->data) break; @@ -2331,7 +2337,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, } mtk_tx_unmap(eth, tx_buf, &bq, true); - desc = ring->dma + cpu * eth->soc->txrx.txd_size; + desc = ring->dma + cpu * eth->soc->tx.desc_size; ring->last_free = desc; atomic_inc(&ring->free_count); @@ -2421,7 +2427,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) do { int rx_done; - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.irq_status); rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); rx_done_total += rx_done; @@ -2437,10 +2443,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) return budget; } while (mtk_r32(eth, reg_map->pdma.irq_status) & - eth->soc->txrx.rx_irq_done_mask); + eth->soc->rx.irq_done_mask); if (napi_complete_done(napi, rx_done_total)) - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); return rx_done_total; } @@ -2449,7 +2455,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; - int i, sz = soc->txrx.txd_size; + int i, sz = soc->tx.desc_size; struct mtk_tx_dma_v2 *txd; int ring_size; u32 ofs, val; @@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) ring_size = MTK_QDMA_RING_SIZE; else - ring_size = MTK_DMA_SIZE; + ring_size = soc->tx.dma_size; ring->buf = kcalloc(ring_size, sizeof(*ring->buf), GFP_KERNEL); @@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) goto no_tx_mem; if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) { - ring->dma = eth->sram_base + ring_size * sz; - ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; + ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz; + ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz; } else { ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, &ring->phys, GFP_KERNEL); @@ -2572,14 +2578,14 @@ static void mtk_tx_clean(struct mtk_eth *eth) } if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) { dma_free_coherent(eth->dma_dev, - ring->dma_size * soc->txrx.txd_size, + ring->dma_size * soc->tx.desc_size, ring->dma, ring->phys); ring->dma = NULL; } if (ring->dma_pdma) { dma_free_coherent(eth->dma_dev, - ring->dma_size * soc->txrx.txd_size, + ring->dma_size * soc->tx.desc_size, ring->dma_pdma, ring->phys_pdma); ring->dma_pdma = NULL; } @@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { const struct mtk_reg_map *reg_map = eth->soc->reg_map; + const struct mtk_soc_data *soc = eth->soc; struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size, tx_ring_size; int i; @@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) tx_ring_size = MTK_QDMA_RING_SIZE; else - tx_ring_size = MTK_DMA_SIZE; + tx_ring_size = soc->tx.dma_size; if (rx_flag == MTK_RX_FLAGS_QDMA) { if (ring_no) @@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) rx_dma_size = MTK_HW_LRO_DMA_SIZE; } else { rx_data_len = ETH_DATA_LEN; - rx_dma_size = MTK_DMA_SIZE; + rx_dma_size = soc->rx.dma_size; } ring->frag_size = mtk_max_frag_size(rx_data_len); @@ -2634,15 +2641,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || rx_flag != MTK_RX_FLAGS_NORMAL) { ring->dma = dma_alloc_coherent(eth->dma_dev, - rx_dma_size * eth->soc->txrx.rxd_size, - &ring->phys, GFP_KERNEL); + rx_dma_size * eth->soc->rx.desc_size, + &ring->phys, GFP_KERNEL); } else { struct mtk_tx_ring *tx_ring = ð->tx_ring; ring->dma = tx_ring->dma + tx_ring_size * - eth->soc->txrx.txd_size * (ring_no + 1); + eth->soc->tx.desc_size * (ring_no + 1); ring->phys = tx_ring->phys + tx_ring_size * - eth->soc->txrx.txd_size * (ring_no + 1); + eth->soc->tx.desc_size * (ring_no + 1); } if (!ring->dma) @@ -2653,7 +2660,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) dma_addr_t dma_addr; void *data; - rxd = ring->dma + i * eth->soc->txrx.rxd_size; + rxd = ring->dma + i * eth->soc->rx.desc_size; if (ring->page_pool) { data = mtk_page_pool_get_buff(ring->page_pool, &dma_addr, GFP_KERNEL); @@ -2690,7 +2697,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) rxd->rxd3 = 0; rxd->rxd4 = 0; - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { rxd->rxd5 = 0; rxd->rxd6 = 0; rxd->rxd7 = 0; @@ -2744,7 +2751,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_ if (!ring->data[i]) continue; - rxd = ring->dma + i * eth->soc->txrx.rxd_size; + rxd = ring->dma + i * eth->soc->rx.desc_size; if (!rxd->rxd1) continue; @@ -2761,7 +2768,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_ if (!in_sram && ring->dma) { dma_free_coherent(eth->dma_dev, - ring->dma_size * eth->soc->txrx.rxd_size, + ring->dma_size * eth->soc->rx.desc_size, ring->dma, ring->phys); ring->dma = NULL; } @@ -3124,7 +3131,7 @@ static void mtk_dma_free(struct mtk_eth *eth) netdev_reset_queue(eth->netdev[i]); if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { dma_free_coherent(eth->dma_dev, - MTK_QDMA_RING_SIZE * soc->txrx.txd_size, + MTK_QDMA_RING_SIZE * soc->tx.desc_size, eth->scratch_ring, eth->phy_scratch_ring); eth->scratch_ring = NULL; eth->phy_scratch_ring = 0; @@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth) mtk_rx_clean(eth, ð->rx_ring[i], false); } - kfree(eth->scratch_head); + for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) { + kfree(eth->scratch_head[i]); + eth->scratch_head[i] = NULL; + } } static bool mtk_hw_reset_check(struct mtk_eth *eth) @@ -3174,7 +3184,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); __napi_schedule(ð->rx_napi); } @@ -3200,9 +3210,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth) const struct mtk_reg_map *reg_map = eth->soc->reg_map; if (mtk_r32(eth, reg_map->pdma.irq_mask) & - eth->soc->txrx.rx_irq_done_mask) { + eth->soc->rx.irq_done_mask) { if (mtk_r32(eth, reg_map->pdma.irq_status) & - eth->soc->txrx.rx_irq_done_mask) + eth->soc->rx.irq_done_mask) mtk_handle_irq_rx(irq, _eth); } if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { @@ -3220,10 +3230,10 @@ static void mtk_poll_controller(struct net_device *dev) struct mtk_eth *eth = mac->hw; mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); mtk_handle_irq_rx(eth->irq[2], dev); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); } #endif @@ -3387,7 +3397,7 @@ static int mtk_open(struct net_device *dev) napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); + mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); refcount_set(ð->dma_refcnt, 1); } else @@ -3471,7 +3481,7 @@ static int mtk_stop(struct net_device *dev) mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); @@ -3893,7 +3903,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) else mtk_hw_reset(eth); - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { /* Set FE to PDMAv2 if necessary */ val = mtk_r32(eth, MTK_FE_GLO_MISC); mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); @@ -3947,9 +3957,9 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) /* FE int grouping */ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4); mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); if (mtk_is_netsys_v3_or_greater(eth)) { @@ -5039,11 +5049,18 @@ static const struct mtk_soc_data mt2701_data = { .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, .version = 1, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5059,11 +5076,18 @@ static const struct mtk_soc_data mt7621_data = { .offload_version = 1, .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5081,11 +5105,18 @@ static const struct mtk_soc_data mt7622_data = { .hash_offset = 2, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5102,11 +5133,18 @@ static const struct mtk_soc_data mt7623_data = { .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .disable_pll_modes = true, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5121,11 +5159,18 @@ static const struct mtk_soc_data mt7629_data = { .required_pctl = false, .has_accounting = true, .version = 1, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5143,13 +5188,20 @@ static const struct mtk_soc_data mt7981_data = { .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), - .rx_irq_done_mask = MTK_RX_DONE_INT_V2, - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5165,13 +5217,20 @@ static const struct mtk_soc_data mt7986_data = { .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), - .rx_irq_done_mask = MTK_RX_DONE_INT_V2, - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5187,13 +5246,20 @@ static const struct mtk_soc_data mt7988_data = { .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), - .rx_irq_done_mask = MTK_RX_DONE_INT_V2, - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(4K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma_v2), + .irq_done_mask = MTK_RX_DONE_INT_V2, + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5204,13 +5270,19 @@ static const struct mtk_soc_data rt5350_data = { .required_clks = MT7628_CLKS_BITMAP, .required_pctl = false, .version = 1, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID_PDMA, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 9ae3b8a71d..a25c33b9a4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -32,7 +32,9 @@ #define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_TX_DMA_BUF_LEN_V2 0xffff #define MTK_QDMA_RING_SIZE 2048 -#define MTK_DMA_SIZE 512 +#define MTK_DMA_SIZE(x) (SZ_##x) +#define MTK_FQ_DMA_HEAD 32 +#define MTK_FQ_DMA_LENGTH 2048 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) #define MTK_DMA_DUMMY_DESC 0xffffffff @@ -327,8 +329,8 @@ /* QDMA descriptor txd3 */ #define TX_DMA_OWNER_CPU BIT(31) #define TX_DMA_LS0 BIT(30) -#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) -#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) +#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset) +#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len) #define TX_DMA_SWC BIT(14) #define TX_DMA_PQID GENMASK(3, 0) #define TX_DMA_ADDR64_MASK GENMASK(3, 0) @@ -348,8 +350,8 @@ /* QDMA descriptor rxd2 */ #define RX_DMA_DONE BIT(31) #define RX_DMA_LSO BIT(30) -#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) -#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset) +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len) #define RX_DMA_VTAG BIT(15) #define RX_DMA_ADDR64_MASK GENMASK(3, 0) #if IS_ENABLED(CONFIG_64BIT) @@ -1153,10 +1155,9 @@ struct mtk_reg_map { * @foe_entry_size Foe table entry size. * @has_accounting Bool indicating support for accounting of * offloaded flows. - * @txd_size Tx DMA descriptor size. - * @rxd_size Rx DMA descriptor size. - * @rx_irq_done_mask Rx irq done register mask. - * @rx_dma_l4_valid Rx DMA valid register mask. + * @desc_size Tx/Rx DMA descriptor size. + * @irq_done_mask Rx irq done register mask. + * @dma_l4_valid Rx DMA valid register mask. * @dma_max_len Max DMA tx/rx buffer length. * @dma_len_offset Tx/Rx DMA length field offset. */ @@ -1174,13 +1175,20 @@ struct mtk_soc_data { bool has_accounting; bool disable_pll_modes; struct { - u32 txd_size; - u32 rxd_size; - u32 rx_irq_done_mask; - u32 rx_dma_l4_valid; + u32 desc_size; u32 dma_max_len; u32 dma_len_offset; - } txrx; + u32 dma_size; + u32 fq_dma_size; + } tx; + struct { + u32 desc_size; + u32 irq_done_mask; + u32 dma_l4_valid; + u32 dma_max_len; + u32 dma_len_offset; + u32 dma_size; + } rx; }; #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) @@ -1261,7 +1269,7 @@ struct mtk_eth { struct napi_struct rx_napi; void *scratch_ring; dma_addr_t phy_scratch_ring; - void *scratch_head; + void *scratch_head[MTK_FQ_DMA_HEAD]; struct clk *clks[MTK_CLK_MAX]; struct mii_bus *mii_bus; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 4957412ff1..20768ef2e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -969,19 +969,32 @@ static void cmd_work_handler(struct work_struct *work) bool poll_cmd = ent->polling; struct mlx5_cmd_layout *lay; struct mlx5_core_dev *dev; - unsigned long cb_timeout; - struct semaphore *sem; + unsigned long timeout; unsigned long flags; int alloc_ret; int cmd_mode; + complete(&ent->handling); + dev = container_of(cmd, struct mlx5_core_dev, cmd); - cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); + timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); - complete(&ent->handling); - sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem; - down(sem); if (!ent->page_queue) { + if (down_timeout(&cmd->vars.sem, timeout)) { + mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n", + mlx5_command_str(ent->op), ent->op); + if (ent->callback) { + ent->callback(-EBUSY, ent->context); + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + cmd_ent_put(ent); + } else { + ent->ret = -EBUSY; + complete(&ent->done); + } + complete(&ent->slotted); + return; + } alloc_ret = cmd_alloc_index(cmd, ent); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); @@ -994,10 +1007,11 @@ static void cmd_work_handler(struct work_struct *work) ent->ret = -EAGAIN; complete(&ent->done); } - up(sem); + up(&cmd->vars.sem); return; } } else { + down(&cmd->vars.pages_sem); ent->idx = cmd->vars.max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); clear_bit(ent->idx, &cmd->vars.bitmask); @@ -1005,6 +1019,8 @@ static void cmd_work_handler(struct work_struct *work) spin_unlock_irqrestore(&cmd->alloc_lock, flags); } + complete(&ent->slotted); + lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -1023,7 +1039,7 @@ static void cmd_work_handler(struct work_struct *work) ent->ts1 = ktime_get_ns(); cmd_mode = cmd->mode; - if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout)) + if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout)) cmd_ent_get(ent); set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); @@ -1143,6 +1159,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) ent->ret = -ECANCELED; goto out_err; } + + wait_for_completion(&ent->slotted); + if (cmd->mode == CMD_MODE_POLLING || ent->polling) wait_for_completion(&ent->done); else if (!wait_for_completion_timeout(&ent->done, timeout)) @@ -1157,6 +1176,9 @@ out_err: } else if (err == -ECANCELED) { mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", mlx5_command_str(ent->op), ent->op); + } else if (err == -EBUSY) { + mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n", + mlx5_command_str(ent->op), ent->op); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -1208,6 +1230,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, ent->polling = force_polling; init_completion(&ent->handling); + init_completion(&ent->slotted); if (!callback) init_completion(&ent->done); @@ -1225,7 +1248,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, return 0; /* mlx5_cmd_comp_handler() will put(ent) */ err = wait_func(dev, ent); - if (err == -ETIMEDOUT || err == -ECANCELED) + if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY) goto out_free; ds = ent->ts2 - ent->ts1; @@ -1611,6 +1634,9 @@ static int cmd_comp_notifier(struct notifier_block *nb, dev = container_of(cmd, struct mlx5_core_dev, cmd); eqe = data; + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + return NOTIFY_DONE; + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); return NOTIFY_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 06592b9f04..9240cfe25d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5_core_dev *mdev) { - /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ - if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { + /* AF_XDP doesn't support frames larger than PAGE_SIZE, + * and xsk->chunk_size is limited to 65535 bytes. + */ + if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size, MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE); return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index caa34b9c16..33e32584b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -102,8 +102,14 @@ static inline void mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) { int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + struct udphdr *udphdr; - udp_hdr(skb)->len = htons(payload_len); + if (skb->encapsulation) + udphdr = (struct udphdr *)skb_inner_transport_header(skb); + else + udphdr = udp_hdr(skb); + + udphdr->len = htons(payload_len); } struct mlx5e_accel_tx_state { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 41a2543a52..e51b03d4c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -750,8 +750,7 @@ err_fs: err_fs_ft: if (rx->allow_tunnel_mode) mlx5_eswitch_unblock_encap(mdev); - mlx5_del_flow_rules(rx->status.rule); - mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); + mlx5_ipsec_rx_status_destroy(ipsec, rx); err_add: mlx5_destroy_flow_table(rx->ft.status); err_fs_ft_status: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 8206461484..359050f0b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) if (!x || !x->xso.offload_handle) goto out_disable; - if (xo->inner_ipproto) { - /* Cannot support tunnel packet over IPsec tunnel mode - * because we cannot offload three IP header csum - */ - if (x->props.mode == XFRM_MODE_TUNNEL) - goto out_disable; - - /* Only support UDP or TCP L4 checksum */ - if (xo->inner_ipproto != IPPROTO_UDP && - xo->inner_ipproto != IPPROTO_TCP) - goto out_disable; - } + /* Only support UDP or TCP L4 checksum */ + if (xo->inner_ipproto && + xo->inner_ipproto != IPPROTO_UDP && + xo->inner_ipproto != IPPROTO_TCP) + goto out_disable; return features; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 319930c040..981a3e0588 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3790,7 +3790,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_fold_sw_stats64(priv, stats); } - stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; + stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer; stats->rx_length_errors = PPORT_802_3_GET(pstats, a_in_range_length_errors) + @@ -4738,7 +4738,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, /* Verify if UDP port is being offloaded by HW */ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) - return features; + return vxlan_features_check(skb, features); #if IS_ENABLED(CONFIG_GENEVE) /* Support Geneve offload for default UDP port */ @@ -4764,7 +4764,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct mlx5e_priv *priv = netdev_priv(netdev); features = vlan_features_check(skb, features); - features = vxlan_features_check(skb, features); /* Validate if the tunneled packet is being offloaded by HW */ if (skb->encapsulation && @@ -6058,7 +6057,7 @@ static int mlx5e_resume(struct auxiliary_device *adev) return 0; } -static int _mlx5e_suspend(struct auxiliary_device *adev) +static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg) { struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); struct mlx5e_priv *priv = mlx5e_dev->priv; @@ -6067,7 +6066,7 @@ static int _mlx5e_suspend(struct auxiliary_device *adev) struct mlx5_core_dev *pos; int i; - if (!netif_device_present(netdev)) { + if (!pre_netdev_reg && !netif_device_present(netdev)) { if (test_bit(MLX5E_STATE_DESTROYING, &priv->state)) mlx5_sd_for_each_dev(i, mdev, pos) mlx5e_destroy_mdev_resources(pos); @@ -6090,7 +6089,7 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx); if (actual_adev) - err = _mlx5e_suspend(actual_adev); + err = _mlx5e_suspend(actual_adev, false); mlx5_sd_cleanup(mdev); return err; @@ -6157,7 +6156,7 @@ static int _mlx5e_probe(struct auxiliary_device *adev) return 0; err_resume: - _mlx5e_suspend(adev); + _mlx5e_suspend(adev, true); err_profile_cleanup: profile->cleanup(priv); err_destroy_netdev: @@ -6197,7 +6196,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev) mlx5_core_uplink_netdev_set(mdev, NULL); mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); - _mlx5e_suspend(adev); + _mlx5e_suspend(adev, false); priv->profile->cleanup(priv); mlx5e_destroy_netdev(priv); mlx5e_devlink_port_unregister(mlx5e_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index e21a3b4128..0964b16ca5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop) *hopbyhop = 0; if (skb->encapsulation) { - ihs = skb_inner_tcp_all_headers(skb); + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_inner_transport_offset(skb) + + sizeof(struct udphdr); + else + ihs = skb_inner_tcp_all_headers(skb); stats->tso_inner_packets++; stats->tso_inner_bytes += skb->len - ihs; } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index 1b9bc32efd..c5ea1d1d2b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_ "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n", addr, vid, vport_num); NL_SET_ERR_MSG_FMT_MOD(extack, - "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n", + "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n", addr, vid, vport_num); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 349e28a6dd..ef55674876 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -833,7 +833,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, struct mlx5_eswitch *slave_esw, int max_slaves); void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, struct mlx5_eswitch *slave_esw); -int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); +int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw); bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); @@ -925,7 +925,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; } static inline int -mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 844d3e3a65..e8caf12f4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2502,6 +2502,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw) esw_offloads_cleanup_reps(esw); } +static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, u8 rep_type) +{ + if (atomic_cmpxchg(&rep->rep_data[rep_type].state, + REP_REGISTERED, REP_LOADED) == REP_REGISTERED) + return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); + + return 0; +} + static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u8 rep_type) { @@ -2526,13 +2536,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) int err; rep = mlx5_eswitch_get_rep(esw, vport_num); - for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) - if (atomic_cmpxchg(&rep->rep_data[rep_type].state, - REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { - err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); - if (err) - goto err_reps; - } + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + goto err_reps; + } return 0; @@ -3277,7 +3285,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) esw_vport_destroy_offloads_acl_tables(esw, vport); } -int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) { struct mlx5_eswitch_rep *rep; unsigned long i; @@ -3290,13 +3298,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) return 0; - ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); + ret = __esw_offloads_load_rep(esw, rep, REP_IB); if (ret) return ret; mlx5_esw_for_each_rep(esw, i, rep) { if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) - mlx5_esw_offloads_rep_load(esw, rep->vport); + __esw_offloads_load_rep(esw, rep, REP_IB); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index e7faf7e73c..6c7f2471fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) do { if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n"); + return -EACCES; + } cond_resched(); } while (!time_after(jiffies, end)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index ad38e31822..a6329ca2d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -248,6 +248,10 @@ recover_from_sw_reset: do { if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n"); + goto unlock; + } msleep(20); } while (!time_after(jiffies, end)); @@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev) mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n"); return -ENODEV; } + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n"); + return -EACCES; + } msleep(100); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 69d482f7c5..58a452d20d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -720,6 +720,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) struct mlx5_core_dev *dev; u8 mode; #endif + bool roce_support; int i; for (i = 0; i < ldev->ports; i++) @@ -746,6 +747,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) if (mlx5_sriov_is_enabled(ldev->pf[i].dev)) return false; #endif + roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev); + for (i = 1; i < ldev->ports; i++) + if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support) + return false; + return true; } @@ -814,7 +820,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) if (shared_fdb) for (i = 0; i < ldev->ports; i++) if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) - mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); } static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) @@ -913,8 +919,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } else if (roce_lag) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - for (i = 1; i < ldev->ports; i++) - mlx5_nic_vport_enable_roce(ldev->pf[i].dev); + for (i = 1; i < ldev->ports; i++) { + if (mlx5_get_roce_state(ldev->pf[i].dev)) + mlx5_nic_vport_enable_roce(ldev->pf[i].dev); + } } else if (shared_fdb) { int i; @@ -922,7 +930,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_rescan_drivers_locked(dev0); for (i = 0; i < ldev->ports; i++) { - err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) break; } @@ -933,7 +941,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_deactivate_lag(ldev); mlx5_lag_add_devices(ldev); for (i = 0; i < ldev->ports; i++) - mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_core_err(dev0, "Failed to enable lag\n"); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 82889f3050..571ea26edd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -99,7 +99,7 @@ static int enable_mpesw(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); for (i = 0; i < ldev->ports; i++) { - err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) goto err_rescan_drivers; } @@ -113,7 +113,7 @@ err_rescan_drivers: err_add_devices: mlx5_lag_add_devices(ldev); for (i = 0; i < ldev->ports; i++) - mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_mpesw_metadata_cleanup(ldev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 101b3bb908..e12bc4cd80 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, &dest, 1); if (IS_ERR(lag_definer->rules[idx])) { err = PTR_ERR(lag_definer->rules[idx]); - while (i--) - while (j--) + do { + while (j--) { + idx = i * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); + } + j = ldev->buckets; + } while (i--); goto destroy_fg; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c index 6b774e0c27..d0b595ba61 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c @@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev) ret = -EBUSY; goto pci_unlock; } + if (pci_channel_offline(dev->pdev)) { + ret = -EACCES; + goto pci_unlock; + } /* Check if semaphore is already locked */ ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c index dd5d186dc6..f6deb5a3f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c @@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev) static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses) { - /* Feature is currently implemented for PFs only */ - if (!mlx5_core_is_pf(dev)) - return false; - /* Honor the SW implementation limit */ if (host_buses > MLX5_SD_MAX_GROUP_SZ) return false; @@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev) bool sdm; int err; + /* Feature is currently implemented for PFs only */ + if (!mlx5_core_is_pf(dev)) + return 0; + + /* Block on embedded CPU PFs */ + if (mlx5_core_is_ecpf(dev)) + return 0; + if (!MLX5_CAP_MCAM_REG(dev, mpir)) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 331ce47f51..459a836a5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) if (!err) mlx5_function_disable(dev, boot); + else + mlx5_stop_health_poll(dev, boot); + return err; } @@ -1680,6 +1683,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev) struct devlink *devlink = priv_to_devlink(dev); int err; + devl_lock(devlink); + devl_register(devlink); dev->state = MLX5_DEVICE_STATE_UP; err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); if (err) { @@ -1693,27 +1698,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev) goto query_hca_caps_err; } - devl_lock(devlink); - devl_register(devlink); - err = mlx5_devlink_params_register(priv_to_devlink(dev)); if (err) { mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err); - goto params_reg_err; + goto query_hca_caps_err; } devl_unlock(devlink); return 0; -params_reg_err: - devl_unregister(devlink); - devl_unlock(devlink); query_hca_caps_err: - devl_unregister(devlink); - devl_unlock(devlink); mlx5_function_disable(dev, true); out: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + devl_unregister(devlink); + devl_unlock(devlink); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 7ebe712808..b2986175d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto remap_err; } + /* Peer devlink logic expects to work on unregistered devlink instance. */ + err = mlx5_core_peer_devlink_set(sf_dev, devlink); + if (err) { + mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err); + goto peer_devlink_set_err; + } + if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev)) err = mlx5_init_one_light(mdev); else @@ -69,20 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto init_one_err; } - err = mlx5_core_peer_devlink_set(sf_dev, devlink); - if (err) { - mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err); - goto peer_devlink_set_err; - } - return 0; -peer_devlink_set_err: - if (mlx5_dev_is_lightweight(sf_dev->mdev)) - mlx5_uninit_one_light(sf_dev->mdev); - else - mlx5_uninit_one(sf_dev->mdev); init_one_err: +peer_devlink_set_err: iounmap(mdev->iseg); remap_err: mlx5_mdev_uninit(mdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index af50ff9e5f..ce49c9514f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -539,7 +539,7 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, if (!dst || dst->error) goto out; - rt6 = container_of(dst, struct rt6_info, dst); + rt6 = dst_rt6_info(dst); dev = dst->dev; *saddrp = fl6.saddr; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 8a6ae171e3..def932035c 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1148,8 +1148,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev, if (netdev->phydev) phy_ethtool_get_wol(netdev->phydev, wol); - wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | - WAKE_MAGIC | WAKE_PHY | WAKE_ARP; + if (wol->supported != adapter->phy_wol_supported) + netif_warn(adapter, drv, adapter->netdev, + "PHY changed its supported WOL! old=%x, new=%x\n", + adapter->phy_wol_supported, wol->supported); + + wol->supported |= MAC_SUPPORTED_WAKES; if (adapter->is_pci11x1x) wol->supported |= WAKE_MAGICSECURE; @@ -1164,7 +1168,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev, { struct lan743x_adapter *adapter = netdev_priv(netdev); + /* WAKE_MAGICSEGURE is a modifier of and only valid together with + * WAKE_MAGIC + */ + if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC)) + return -EINVAL; + + if (netdev->phydev) { + struct ethtool_wolinfo phy_wol; + int ret; + + phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported; + + /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC + * for PHYs that do not support WAKE_MAGICSECURE + */ + if (wol->wolopts & WAKE_MAGICSECURE && + !(adapter->phy_wol_supported & WAKE_MAGICSECURE)) + phy_wol.wolopts &= ~WAKE_MAGIC; + + ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol); + if (ret && (ret != -EOPNOTSUPP)) + return ret; + + if (ret == -EOPNOTSUPP) + adapter->phy_wolopts = 0; + else + adapter->phy_wolopts = phy_wol.wolopts; + } else { + adapter->phy_wolopts = 0; + } + adapter->wolopts = 0; + wol->wolopts &= ~adapter->phy_wolopts; if (wol->wolopts & WAKE_UCAST) adapter->wolopts |= WAKE_UCAST; if (wol->wolopts & WAKE_MCAST) @@ -1185,10 +1221,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev, memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX); } + wol->wolopts = adapter->wolopts | adapter->phy_wolopts; device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); - return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol) - : -ENETDOWN; + return 0; } #endif /* CONFIG_PM */ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 75a988c0bd..ecde3582e3 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -3111,6 +3111,17 @@ static int lan743x_netdev_open(struct net_device *netdev) if (ret) goto close_tx; } + +#ifdef CONFIG_PM + if (adapter->netdev->phydev) { + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + + phy_ethtool_get_wol(netdev->phydev, &wol); + adapter->phy_wol_supported = wol.supported; + adapter->phy_wolopts = wol.wolopts; + } +#endif + return 0; close_tx: @@ -3568,7 +3579,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) /* clear wake settings */ pmtctl = lan743x_csr_read(adapter, PMT_CTL); - pmtctl |= PMT_CTL_WUPS_MASK_; + pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_; pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); @@ -3580,10 +3591,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; - if (adapter->wolopts & WAKE_PHY) { - pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; + if (adapter->phy_wolopts) pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; - } + if (adapter->wolopts & WAKE_MAGIC) { wucsr |= MAC_WUCSR_MPEN_; macrx |= MAC_RX_RXEN_; @@ -3679,7 +3689,7 @@ static int lan743x_pm_suspend(struct device *dev) lan743x_csr_write(adapter, MAC_WUCSR2, 0); lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); - if (adapter->wolopts) + if (adapter->wolopts || adapter->phy_wolopts) lan743x_pm_set_wol(adapter); if (adapter->is_pci11x1x) { @@ -3703,6 +3713,7 @@ static int lan743x_pm_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct lan743x_adapter *adapter = netdev_priv(netdev); + u32 data; int ret; pci_set_power_state(pdev, PCI_D0); @@ -3721,6 +3732,30 @@ static int lan743x_pm_resume(struct device *dev) return ret; } + ret = lan743x_csr_read(adapter, MAC_WK_SRC); + netif_dbg(adapter, drv, adapter->netdev, + "Wakeup source : 0x%08X\n", ret); + + /* Clear the wol configuration and status bits. Note that + * the status bits are "Write One to Clear (W1C)" + */ + data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ | + MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ | + MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_; + lan743x_csr_write(adapter, MAC_WUCSR, data); + + data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ | + MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_; + lan743x_csr_write(adapter, MAC_WUCSR2, data); + + data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ | + MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ | + MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ | + MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ | + MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ | + MAC_WK_SRC_WK_FR_SAVED_; + lan743x_csr_write(adapter, MAC_WK_SRC, data); + /* open netdev when netdev is at running state while resume. * For instance, it is true when system wakesup after pm-suspend * However, it is false when system wakes up after suspend GUI menu @@ -3729,9 +3764,6 @@ static int lan743x_pm_resume(struct device *dev) lan743x_netdev_open(netdev); netif_device_attach(netdev); - ret = lan743x_csr_read(adapter, MAC_WK_SRC); - netif_info(adapter, drv, adapter->netdev, - "Wakeup source : 0x%08X\n", ret); return 0; } diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 645bc048e5..3b2585a384 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -61,6 +61,7 @@ #define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18) #define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15) #define PMT_CTL_EEE_WAKEUP_EN_ BIT(13) +#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8) #define PMT_CTL_READY_ BIT(7) #define PMT_CTL_ETH_PHY_RST_ BIT(4) #define PMT_CTL_WOL_EN_ BIT(3) @@ -227,12 +228,31 @@ #define MAC_WUCSR (0x140) #define MAC_MP_SO_EN_ BIT(21) #define MAC_WUCSR_RFE_WAKE_EN_ BIT(14) +#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13) +#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11) +#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9) +#define MAC_WUCSR_PFDA_FR_ BIT(7) +#define MAC_WUCSR_WUFR_ BIT(6) +#define MAC_WUCSR_MPR_ BIT(5) +#define MAC_WUCSR_BCAST_FR_ BIT(4) #define MAC_WUCSR_PFDA_EN_ BIT(3) #define MAC_WUCSR_WAKE_EN_ BIT(2) #define MAC_WUCSR_MPEN_ BIT(1) #define MAC_WUCSR_BCST_EN_ BIT(0) #define MAC_WK_SRC (0x144) +#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17) +#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16) +#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15) +#define MAC_WK_SRC_EEE_TX_WK_ BIT(14) +#define MAC_WK_SRC_EEE_RX_WK_ BIT(13) +#define MAC_WK_SRC_RFE_FR_WK_ BIT(12) +#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11) +#define MAC_WK_SRC_MP_FR_WK_ BIT(10) +#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9) +#define MAC_WK_SRC_WU_FR_WK_ BIT(8) +#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7) + #define MAC_MP_SO_HI (0x148) #define MAC_MP_SO_LO (0x14C) @@ -295,6 +315,10 @@ #define RFE_INDX(index) (0x580 + (index << 2)) #define MAC_WUCSR2 (0x600) +#define MAC_WUCSR2_NS_RCD_ BIT(7) +#define MAC_WUCSR2_ARP_RCD_ BIT(6) +#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5) +#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4) #define SGMII_ACC (0x720) #define SGMII_ACC_SGMII_BZY_ BIT(31) @@ -1018,6 +1042,8 @@ enum lan743x_sgmii_lsd { LINK_2500_SLAVE }; +#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \ + WAKE_MAGIC | WAKE_ARP) struct lan743x_adapter { struct net_device *netdev; struct mii_bus *mdiobus; @@ -1025,6 +1051,8 @@ struct lan743x_adapter { #ifdef CONFIG_PM u32 wolopts; u8 sopass[SOPASS_MAX]; + u32 phy_wolopts; + u32 phy_wol_supported; #endif struct pci_dev *pdev; struct lan743x_csr csr; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 2635ef8958..6695ed661e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -474,14 +474,14 @@ static int lan966x_port_hwtstamp_set(struct net_device *dev, cfg->source != HWTSTAMP_SOURCE_PHYLIB) return -EOPNOTSUPP; + if (cfg->source == HWTSTAMP_SOURCE_NETDEV && !port->lan966x->ptp) + return -EOPNOTSUPP; + err = lan966x_ptp_setup_traps(port, cfg); if (err) return err; if (cfg->source == HWTSTAMP_SOURCE_NETDEV) { - if (!port->lan966x->ptp) - return -EOPNOTSUPP; - err = lan966x_ptp_hwtstamp_set(port, cfg, extack); if (err) { lan966x_ptp_del_traps(port); @@ -1087,8 +1087,6 @@ static int lan966x_probe(struct platform_device *pdev) platform_set_drvdata(pdev, lan966x); lan966x->dev = &pdev->dev; - lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL); - if (!device_get_mac_address(&pdev->dev, mac_addr)) { ether_addr_copy(lan966x->base_mac, mac_addr); } else { @@ -1179,6 +1177,8 @@ static int lan966x_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, -ENODEV, "no ethernet-ports child found\n"); + lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL); + /* init switch */ lan966x_init(lan966x); lan966x_stats_init(lan966x); @@ -1257,6 +1257,8 @@ cleanup_ports: destroy_workqueue(lan966x->stats_queue); mutex_destroy(&lan966x->stats_lock); + debugfs_remove_recursive(lan966x->debugfs_root); + return err; } diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 2729a2c5ac..ca814fe8a7 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -848,7 +848,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, } if (!wait_for_completion_timeout(&ctx->comp_event, - (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) { + (msecs_to_jiffies(hwc->hwc_timeout)))) { dev_err(hwc->dev, "HWC: Request timed out!\n"); err = -ETIMEDOUT; goto out; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 7f0c6cdc37..0cd819bc4a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) if (ret) return ret; - if (qcq->napi.poll) - napi_enable(&qcq->napi); - if (qcq->flags & IONIC_QCQ_F_INTR) { + napi_enable(&qcq->napi); irq_set_affinity_hint(qcq->intr.vector, &qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 5dba6d2d63..2427610f43 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); goto out_xdp_abort; } + buf_info->page = NULL; stats->xdp_tx++; /* the Tx completion will free the buffers */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c278f88930..8159b4c315 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1206,7 +1206,6 @@ out: static int qed_slowpath_wq_start(struct qed_dev *cdev) { struct qed_hwfn *hwfn; - char name[NAME_SIZE]; int i; if (IS_VF(cdev)) @@ -1215,11 +1214,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev) for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; - snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", - cdev->pdev->bus->number, - PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x", + 0, 0, cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), + hwfn->abs_pf_id); - hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); if (!hwfn->slowpath_wq) { DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index ff3b89e902..ad06da0fda 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what) seq_printf(s, "IRQ : %d\n", qca->spi_dev->irq); - seq_printf(s, "INTR REQ : %u\n", - qca->intr_req); - seq_printf(s, "INTR SVC : %u\n", - qca->intr_svc); + seq_printf(s, "INTR : %lx\n", + qca->intr); seq_printf(s, "SPI max speed : %lu\n", (unsigned long)qca->spi_dev->max_speed_hz); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 5799ecc88a..8f7ce6b51a 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -35,6 +35,8 @@ #define MAX_DMA_BURST_LEN 5000 +#define SPI_INTR 0 + /* Modules parameters */ #define QCASPI_CLK_SPEED_MIN 1000000 #define QCASPI_CLK_SPEED_MAX 16000000 @@ -579,14 +581,14 @@ qcaspi_spi_thread(void *data) continue; } - if ((qca->intr_req == qca->intr_svc) && + if (!test_bit(SPI_INTR, &qca->intr) && !qca->txr.skb[qca->txr.head]) schedule(); set_current_state(TASK_RUNNING); - netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n", - qca->intr_req - qca->intr_svc, + netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n", + qca->intr, qca->txr.skb[qca->txr.head]); qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); @@ -600,8 +602,7 @@ qcaspi_spi_thread(void *data) msleep(QCASPI_QCA7K_REBOOT_TIME_MS); } - if (qca->intr_svc != qca->intr_req) { - qca->intr_svc = qca->intr_req; + if (test_and_clear_bit(SPI_INTR, &qca->intr)) { start_spi_intr_handling(qca, &intr_cause); if (intr_cause & SPI_INT_CPU_ON) { @@ -663,7 +664,7 @@ qcaspi_intr_handler(int irq, void *data) { struct qcaspi *qca = data; - qca->intr_req++; + set_bit(SPI_INTR, &qca->intr); if (qca->spi_thread) wake_up_process(qca->spi_thread); @@ -679,8 +680,7 @@ qcaspi_netdev_open(struct net_device *dev) if (!qca) return -EINVAL; - qca->intr_req = 1; - qca->intr_svc = 0; + set_bit(SPI_INTR, &qca->intr); qca->sync = QCASPI_SYNC_UNKNOWN; qcafrm_fsm_init_spi(&qca->frm_handle); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index d59cb2352c..8f4808695e 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -81,8 +81,7 @@ struct qcaspi { struct qcafrm_handle frm_handle; struct sk_buff *rx_skb; - unsigned int intr_req; - unsigned int intr_svc; + unsigned long intr; u16 reset_count; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0fc5fe564a..7df2017c9a 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4335,11 +4335,11 @@ static void rtl8169_doorbell(struct rtl8169_private *tp) static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) { - unsigned int frags = skb_shinfo(skb)->nr_frags; struct rtl8169_private *tp = netdev_priv(dev); unsigned int entry = tp->cur_tx % NUM_TX_DESC; struct TxDesc *txd_first, *txd_last; bool stop_queue, door_bell; + unsigned int frags; u32 opts[2]; if (unlikely(!rtl_tx_slots_avail(tp))) { @@ -4362,6 +4362,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd_first = tp->TxDescArray + entry; + frags = skb_shinfo(skb)->nr_frags; if (frags) { if (rtl8169_xmit_frags(tp, skb, opts, entry)) goto err_dma_1; @@ -4655,10 +4656,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } - if (napi_schedule_prep(&tp->napi)) { - rtl_irq_disable(tp); - __napi_schedule(&tp->napi); - } + rtl_irq_disable(tp); + napi_schedule(&tp->napi); out: rtl_ack_events(tp, status); diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 46eee747c6..45ef5ac078 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -156,8 +156,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l) writew(*wp++, a); } -#define SMC_inw(a, r) _swapw(readw((a) + (r))) -#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r)) +#define SMC_inw(a, r) ioread16be((a) + (r)) +#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r)) #define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) #define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e254b21fdb..65d7370b47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -93,6 +93,7 @@ struct ethqos_emac_driver_data { bool has_emac_ge_3; const char *link_clk_name; bool has_integrated_pcs; + u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; }; @@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .has_emac_ge_3 = true, .link_clk_name = "phyaux", .has_integrated_pcs = true, + .dma_addr_width = 36, .dwmac4_addrs = { .dma_chan = 0x00008100, .dma_chan_offset = 0x1000, @@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI; if (data->has_integrated_pcs) plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS; + if (data->dma_addr_width) + plat_dat->host_dma_width = data->dma_addr_width; if (ethqos->serdes_phy) { plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index dddcaa9220..64b21c83e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -261,6 +261,8 @@ struct stmmac_priv { struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp; struct stmmac_safety_stats sstats; struct plat_stmmacenet_data *plat; + /* Protect est parameters */ + struct mutex est_lock; struct dma_features dma_cap; struct stmmac_counters mmc; int hw_cap_support; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index f05bd757df..5ef52ef269 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv) { u32 num_snapshot, ts_status, tsync_int; struct ptp_clock_event event; + u32 acr_value, channel; unsigned long flags; u64 ptp_time; int i; @@ -243,12 +244,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv) num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >> GMAC_TIMESTAMP_ATSNS_SHIFT; + acr_value = readl(priv->ptpaddr + PTP_ACR); + channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value)); + for (i = 0; i < num_snapshot; i++) { read_lock_irqsave(&priv->ptp_lock, flags); get_ptptime(priv->ptpaddr, &ptp_time); read_unlock_irqrestore(&priv->ptp_lock, flags); event.type = PTP_CLOCK_EXTTS; - event.index = 0; + event.index = channel; event.timestamp = ptp_time; ptp_clock_event(priv->ptp_clock, &event); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index e04830a3a1..0c5aab6dd7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -70,11 +70,11 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) /* If EST is enabled, disabled it before adjust ptp time. */ if (priv->plat->est && priv->plat->est->enable) { est_rst = true; - mutex_lock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); priv->plat->est->enable = false; stmmac_est_configure(priv, priv, priv->plat->est, priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); } write_lock_irqsave(&priv->ptp_lock, flags); @@ -87,7 +87,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) ktime_t current_time_ns, basetime; u64 cycle_time; - mutex_lock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); time.tv_nsec = priv->plat->est->btr_reserve[0]; @@ -104,7 +104,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) priv->plat->est->enable = true; ret = stmmac_est_configure(priv, priv, priv->plat->est, priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); if (ret) netdev_err(priv->dev, "failed to configure EST\n"); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index cce0071993..7d240a2b54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv, struct tc_cbs_qopt_offload *qopt) { u32 tx_queues_count = priv->plat->tx_queues_to_use; + s64 port_transmit_rate_kbps; u32 queue = qopt->queue; - u32 ptr, speed_div; u32 mode_to_use; u64 value; + u32 ptr; int ret; /* Queue 0 is not AVB capable */ @@ -355,30 +356,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv, if (!priv->dma_cap.av) return -EOPNOTSUPP; - /* Port Transmit Rate and Speed Divider */ - switch (priv->speed) { - case SPEED_10000: - ptr = 32; - speed_div = 10000000; - break; - case SPEED_5000: - ptr = 32; - speed_div = 5000000; - break; - case SPEED_2500: - ptr = 8; - speed_div = 2500000; - break; - case SPEED_1000: - ptr = 8; - speed_div = 1000000; - break; - case SPEED_100: - ptr = 4; - speed_div = 100000; - break; - default: - return -EOPNOTSUPP; + port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope; + + if (qopt->enable) { + /* Port Transmit Rate and Speed Divider */ + switch (div_s64(port_transmit_rate_kbps, 1000)) { + case SPEED_10000: + case SPEED_5000: + ptr = 32; + break; + case SPEED_2500: + case SPEED_1000: + ptr = 8; + break; + case SPEED_100: + ptr = 4; + break; + default: + netdev_err(priv->dev, + "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n", + port_transmit_rate_kbps); + return -EINVAL; + } + } else { + ptr = 0; } mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; @@ -398,10 +399,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv, } /* Final adjustments for HW */ - value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); + value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps); priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); - value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); + value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps); priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); value = qopt->hicredit * 1024ll * 8; @@ -1004,17 +1005,19 @@ static int tc_taprio_configure(struct stmmac_priv *priv, if (!plat->est) return -ENOMEM; - mutex_init(&priv->plat->est->lock); + mutex_init(&priv->est_lock); } else { + mutex_lock(&priv->est_lock); memset(plat->est, 0, sizeof(*plat->est)); + mutex_unlock(&priv->est_lock); } size = qopt->num_entries; - mutex_lock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); priv->plat->est->gcl_size = size; priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE; - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); for (i = 0; i < size; i++) { s64 delta_ns = qopt->entries[i].interval; @@ -1045,7 +1048,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, priv->plat->est->gcl[i] = delta_ns | (gates << wid); } - mutex_lock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); /* Adjust for real system time */ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); @@ -1068,7 +1071,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, tc_taprio_map_maxsdu_txq(priv, qopt); if (fpe && !priv->dma_cap.fpesel) { - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); return -EOPNOTSUPP; } @@ -1079,7 +1082,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, ret = stmmac_est_configure(priv, priv, priv->plat->est, priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); if (ret) { netdev_err(priv->dev, "failed to configure EST\n"); goto disable; @@ -1096,7 +1099,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, disable: if (priv->plat->est) { - mutex_lock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); priv->plat->est->enable = false; stmmac_est_configure(priv, priv, priv->plat->est, priv->plat->clk_ptp_rate); @@ -1105,7 +1108,7 @@ disable: priv->xstats.max_sdu_txq_drop[i] = 0; priv->xstats.mtl_est_txq_hlbf[i] = 0; } - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); } priv->plat->fpe_cfg->enable = false; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 9bd1df8308..d3a2fbb141 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void gem_poll_controller(struct net_device *dev) -{ - struct gem *gp = netdev_priv(dev); - - disable_irq(gp->pdev->irq); - gem_interrupt(gp->pdev->irq, dev); - enable_irq(gp->pdev->irq); -} -#endif - static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct gem *gp = netdev_priv(dev); @@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = { .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = gem_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = gem_poll_controller, -#endif }; static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c index 6df53ab17f..902a271778 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c +++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c @@ -360,7 +360,7 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) { const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, }; - rx_class_ft1_set_start_len(miig_rt, slice, 0, 6); + rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN); rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr); rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr); rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index b69af69a1c..61ebac7ba6 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -2152,7 +2152,12 @@ static int prueth_probe(struct platform_device *pdev) prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; - emac_phy_connect(prueth->emac[PRUETH_MAC0]); + ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]); + if (ret) { + dev_err(dev, + "can't connect to MII0 PHY, error -%d", ret); + goto netdev_unregister; + } phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); } @@ -2164,7 +2169,12 @@ static int prueth_probe(struct platform_device *pdev) } prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; - emac_phy_connect(prueth->emac[PRUETH_MAC1]); + ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]); + if (ret) { + dev_err(dev, + "can't connect to MII1 PHY, error %d", ret); + goto netdev_unregister; + } phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 945c13d1a9..c09a6f7445 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1958,6 +1958,8 @@ int wx_sw_init(struct wx *wx) return -ENOMEM; } + bitmap_zero(wx->state, WX_STATE_NBITS); + return 0; } EXPORT_SYMBOL(wx_sw_init); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 6fae161cbc..07ba3a270a 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2690,15 +2690,63 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) wx->rss_enabled = false; } - if (changed & - (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX)) + netdev->features = features; + + if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX) + wx->do_reset(netdev); + else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) wx_set_rx_mode(netdev); - return 1; + return 0; } EXPORT_SYMBOL(wx_set_features); +#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX) + +#define NETIF_VLAN_INSERTION_FEATURES (NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +netdev_features_t wx_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct wx *wx = netdev_priv(netdev); + + if (changed & NETIF_VLAN_STRIPPING_FEATURES) { + if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES && + (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) { + features &= ~NETIF_VLAN_STRIPPING_FEATURES; + features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES; + wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off."); + } + } + + if (changed & NETIF_VLAN_INSERTION_FEATURES) { + if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES && + (features & NETIF_VLAN_INSERTION_FEATURES) != 0) { + features &= ~NETIF_VLAN_INSERTION_FEATURES; + features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES; + wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off."); + } + } + + if (changed & NETIF_VLAN_FILTERING_FEATURES) { + if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES && + (features & NETIF_VLAN_FILTERING_FEATURES) != 0) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES; + wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off."); + } + } + + return features; +} +EXPORT_SYMBOL(wx_fix_features); + void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring) { diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index ec909e8767..c41b29ea81 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -30,6 +30,8 @@ int wx_setup_resources(struct wx *wx); void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); int wx_set_features(struct net_device *netdev, netdev_features_t features); +netdev_features_t wx_fix_features(struct net_device *netdev, + netdev_features_t features); void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 1fdeb464d5..5aaf7b1fa2 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -982,8 +982,13 @@ struct wx_hw_stats { u64 qmprc; }; +enum wx_state { + WX_STATE_RESETTING, + WX_STATE_NBITS, /* must be last */ +}; struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + DECLARE_BITMAP(state, WX_STATE_NBITS); void *priv; u8 __iomem *hw_addr; @@ -1071,6 +1076,8 @@ struct wx { u64 hw_csum_rx_good; u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + + void (*do_reset)(struct net_device *netdev); }; #define WX_INTR_ALL (~0ULL) @@ -1131,4 +1138,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config) return container_of(config, struct wx, phylink_config); } +static inline int wx_set_state_reset(struct wx *wx) +{ + u8 timeout = 50; + + while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) { + timeout--; + if (!timeout) + return -EBUSY; + + usleep_range(1000, 2000); + } + + return 0; +} + #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 786a652ae6..46a5a3e952 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev, struct wx *wx = netdev_priv(netdev); u32 new_rx_count, new_tx_count; struct wx_ring *temp_ring; - int i; + int i, err = 0; new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); @@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev, new_rx_count == wx->rx_ring_count) return 0; + err = wx_set_state_reset(wx); + if (err) + return err; + if (!netif_running(wx->netdev)) { for (i = 0; i < wx->num_tx_queues; i++) wx->tx_ring[i]->count = new_tx_count; @@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev, wx->tx_ring_count = new_tx_count; wx->rx_ring_count = new_rx_count; - return 0; + goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); - if (!temp_ring) - return -ENOMEM; + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } ngbe_down(wx); @@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev, wx_configure(wx); ngbe_up(wx); - return 0; +clear_reset: + clear_bit(WX_STATE_RESETTING, wx->state); + return err; } static int ngbe_set_channels(struct net_device *dev, diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index fdd6b4f70b..e894e01d03 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -499,6 +499,7 @@ static const struct net_device_ops ngbe_netdev_ops = { .ndo_start_xmit = wx_xmit_frame, .ndo_set_rx_mode = wx_set_rx_mode, .ndo_set_features = wx_set_features, + .ndo_fix_features = wx_fix_features, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index db675512ce..31fde3fa7c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -19,7 +19,7 @@ static int txgbe_set_ringparam(struct net_device *netdev, struct wx *wx = netdev_priv(netdev); u32 new_rx_count, new_tx_count; struct wx_ring *temp_ring; - int i; + int i, err = 0; new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); @@ -31,6 +31,10 @@ static int txgbe_set_ringparam(struct net_device *netdev, new_rx_count == wx->rx_ring_count) return 0; + err = wx_set_state_reset(wx); + if (err) + return err; + if (!netif_running(wx->netdev)) { for (i = 0; i < wx->num_tx_queues; i++) wx->tx_ring[i]->count = new_tx_count; @@ -39,14 +43,16 @@ static int txgbe_set_ringparam(struct net_device *netdev, wx->tx_ring_count = new_tx_count; wx->rx_ring_count = new_rx_count; - return 0; + goto clear_reset; } /* allocate temporary buffer to store rings in */ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); - if (!temp_ring) - return -ENOMEM; + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } txgbe_down(wx); @@ -55,7 +61,9 @@ static int txgbe_set_ringparam(struct net_device *netdev, txgbe_up(wx); - return 0; +clear_reset: + clear_bit(WX_STATE_RESETTING, wx->state); + return err; } static int txgbe_set_channels(struct net_device *dev, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index bd4624d14c..8c7a74981b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -269,6 +269,8 @@ static int txgbe_sw_init(struct wx *wx) wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + wx->do_reset = txgbe_do_reset; + return 0; } @@ -421,6 +423,34 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } +static void txgbe_reinit_locked(struct wx *wx) +{ + int err = 0; + + netif_trans_update(wx->netdev); + + err = wx_set_state_reset(wx); + if (err) { + wx_err(wx, "wait device reset timeout\n"); + return; + } + + txgbe_down(wx); + txgbe_up(wx); + + clear_bit(WX_STATE_RESETTING, wx->state); +} + +void txgbe_do_reset(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + if (netif_running(netdev)) + txgbe_reinit_locked(wx); + else + txgbe_reset(wx); +} + static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, @@ -428,6 +458,7 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_start_xmit = wx_xmit_frame, .ndo_set_rx_mode = wx_set_rx_mode, .ndo_set_features = wx_set_features, + .ndo_fix_features = wx_fix_features, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 1b4ff50d58..f434a7865c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -134,6 +134,7 @@ extern char txgbe_driver_name[]; void txgbe_down(struct wx *wx); void txgbe_up(struct wx *wx); int txgbe_setup_tc(struct net_device *dev, u8 tc); +void txgbe_do_reset(struct net_device *netdev); #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6c2835086b..76053d2c15 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -811,6 +811,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { + bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); const struct ip_tunnel_key *key = &info->key; @@ -822,7 +823,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb)) + if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs4) @@ -903,7 +904,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, } err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr), - geneve->cfg.inner_proto_inherit); + inner_proto_inherit); if (unlikely(err)) return err; @@ -919,6 +920,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { + bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); const struct ip_tunnel_key *key = &info->key; @@ -929,7 +931,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb)) + if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs6) @@ -991,7 +993,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip6_dst_hoplimit(dst); } err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr), - geneve->cfg.inner_proto_inherit); + inner_proto_inherit); if (unlikely(err)) return err; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 2d5b021b4e..fef4eff775 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - err = ip_local_out(net, skb->sk, skb); + err = ip_local_out(net, NULL, skb); if (unlikely(net_xmit_eval(err))) DEV_STATS_INC(dev, tx_errors); else @@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); - err = ip6_local_out(dev_net(dev), skb->sk, skb); + err = ip6_local_out(dev_net(dev), NULL, skb); if (unlikely(net_xmit_eval(err))) DEV_STATS_INC(dev, tx_errors); else diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 8330bc0bcb..d405809030 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -292,7 +292,8 @@ static int nsim_get_iflink(const struct net_device *dev) rcu_read_lock(); peer = rcu_dereference(nsim->peer); - iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0; + iflink = peer ? READ_ONCE(peer->netdev->ifindex) : + READ_ONCE(dev->ifindex); rcu_read_unlock(); return iflink; diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index a4d2e76a8d..16789cd446 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet) skb_scrub_packet(skb, xnet); skb->priority = 0; nf_skip_egress(skb, true); + skb_reset_mac_header(skb); } static struct netkit *netkit_priv(const struct net_device *dev) @@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) skb_orphan_frags(skb, GFP_ATOMIC))) goto drop; netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer))); + eth_skb_pkt_type(skb, peer); skb->dev = peer; entry = rcu_dereference(nk->active); if (entry) @@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) switch (ret) { case NETKIT_NEXT: case NETKIT_PASS: - skb->protocol = eth_type_trans(skb, skb->dev); + eth_skb_pull_mac(skb); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) { dev_sw_netstats_tx_add(dev, 1, len); @@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev) /* Nothing to do, we receive whatever gets pushed to us! */ } +static int netkit_set_macaddr(struct net_device *dev, void *sa) +{ + struct netkit *nk = netkit_priv(dev); + + if (nk->mode != NETKIT_L2) + return -EOPNOTSUPP; + + return eth_mac_addr(dev, sa); +} + static void netkit_set_headroom(struct net_device *dev, int headroom) { struct netkit *nk = netkit_priv(dev), *nk2; @@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = { .ndo_start_xmit = netkit_xmit, .ndo_set_rx_mode = netkit_set_multicast, .ndo_set_rx_headroom = netkit_set_headroom, + .ndo_set_mac_address = netkit_set_macaddr, .ndo_get_iflink = netkit_get_iflink, .ndo_get_peer_dev = netkit_peer_dev, .ndo_get_stats64 = netkit_get_stats, @@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[], if (!attr) return 0; - NL_SET_ERR_MSG_ATTR(extack, attr, - "Setting Ethernet address is not supported"); - return -EOPNOTSUPP; + if (nla_len(attr) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(attr))) + return -EADDRNOTAVAIL; + return 0; } static struct rtnl_link_ops netkit_link_ops; @@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, strscpy(ifname, "nk%d", IFNAMSIZ); ifname_assign_type = NET_NAME_ENUM; } + if (mode != NETKIT_L2 && + (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS])) + return -EOPNOTSUPP; net = rtnl_link_get_net(src_net, tbp); if (IS_ERR(net)) @@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, netif_inherit_tso_max(peer, dev); - if (mode == NETKIT_L2) + if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS])) eth_hw_addr_random(peer); if (ifmp && dev->ifindex) peer->ifindex = ifmp->ifi_index; @@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, if (err < 0) goto err_configure_peer; - if (mode == NETKIT_L2) + if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS]) eth_hw_addr_random(dev); if (tb[IFLA_IFNAME]) nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c index 326c9770a6..c706429b22 100644 --- a/drivers/net/phy/dp83tg720.c +++ b/drivers/net/phy/dp83tg720.c @@ -17,6 +17,11 @@ #define DP83TG720S_PHY_RESET 0x1f #define DP83TG720S_HW_RESET BIT(15) +#define DP83TG720S_LPS_CFG3 0x18c +/* Power modes are documented as bit fields but used as values */ +/* Power Mode 0 is Normal mode */ +#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0) + #define DP83TG720S_RGMII_DELAY_CTRL 0x602 /* In RGMII mode, Enable or disable the internal delay for RXD */ #define DP83TG720S_RGMII_RX_CLK_SEL BIT(1) @@ -31,11 +36,20 @@ static int dp83tg720_config_aneg(struct phy_device *phydev) { + int ret; + /* Autoneg is not supported and this PHY supports only one speed. * We need to care only about master/slave configuration if it was * changed by user. */ - return genphy_c45_pma_baset1_setup_master_slave(phydev); + ret = genphy_c45_pma_baset1_setup_master_slave(phydev); + if (ret) + return ret; + + /* Re-read role configuration to make changes visible even if + * the link is in administrative down state. + */ + return genphy_c45_pma_baset1_read_master_slave(phydev); } static int dp83tg720_read_status(struct phy_device *phydev) @@ -64,6 +78,8 @@ static int dp83tg720_read_status(struct phy_device *phydev) return ret; /* After HW reset we need to restore master/slave configuration. + * genphy_c45_pma_baset1_read_master_slave() call will be done + * by the dp83tg720_config_aneg() function. */ ret = dp83tg720_config_aneg(phydev); if (ret) @@ -154,10 +170,24 @@ static int dp83tg720_config_init(struct phy_device *phydev) */ usleep_range(1000, 2000); - if (phy_interface_is_rgmii(phydev)) - return dp83tg720_config_rgmii_delay(phydev); + if (phy_interface_is_rgmii(phydev)) { + ret = dp83tg720_config_rgmii_delay(phydev); + if (ret) + return ret; + } + + /* In case the PHY is bootstrapped in managed mode, we need to + * wake it. + */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3, + DP83TG720S_LPS_CFG3_PWR_MODE_0); + if (ret) + return ret; - return 0; + /* Make role configuration visible for ethtool on init and after + * rest. + */ + return genphy_c45_pma_baset1_read_master_slave(phydev); } static struct phy_driver dp83tg720_driver[] = { diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ddb50a0e2b..4b22bb6393 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -785,6 +785,17 @@ static int ksz8061_config_init(struct phy_device *phydev) { int ret; + /* Chip can be powered down by the bootstrap code. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (ret & BMCR_PDOWN) { + ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN); + if (ret < 0) + return ret; + usleep_range(1000, 2000); + } + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); if (ret) return ret; @@ -1858,7 +1869,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = { {0x1c, 0x20, 0xeeee}, }; -static int ksz9477_config_init(struct phy_device *phydev) +static int ksz9477_phy_errata(struct phy_device *phydev) { int err; int i; @@ -1886,16 +1897,30 @@ static int ksz9477_config_init(struct phy_device *phydev) return err; } + err = genphy_restart_aneg(phydev); + if (err) + return err; + + return err; +} + +static int ksz9477_config_init(struct phy_device *phydev) +{ + int err; + + /* Only KSZ9897 family of switches needs this fix. */ + if ((phydev->phy_id & 0xf) == 1) { + err = ksz9477_phy_errata(phydev); + if (err) + return err; + } + /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes * in this switch shall be regarded as broken. */ if (phydev->dev_flags & MICREL_NO_EEE) phydev->eee_broken_modes = -1; - err = genphy_restart_aneg(phydev); - if (err) - return err; - return kszphy_config_init(phydev); } @@ -2004,6 +2029,71 @@ static int kszphy_resume(struct phy_device *phydev) return 0; } +static int ksz9477_resume(struct phy_device *phydev) +{ + int ret; + + /* No need to initialize registers if not powered down. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (!(ret & BMCR_PDOWN)) + return 0; + + genphy_resume(phydev); + + /* After switching from power-down to normal mode, an internal global + * reset is automatically generated. Wait a minimum of 1 ms before + * read/write access to the PHY registers. + */ + usleep_range(1000, 2000); + + /* Only KSZ9897 family of switches needs this fix. */ + if ((phydev->phy_id & 0xf) == 1) { + ret = ksz9477_phy_errata(phydev); + if (ret) + return ret; + } + + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } + + return 0; +} + +static int ksz8061_resume(struct phy_device *phydev) +{ + int ret; + + /* This function can be called twice when the Ethernet device is on. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (!(ret & BMCR_PDOWN)) + return 0; + + genphy_resume(phydev); + usleep_range(1000, 2000); + + /* Re-program the value after chip is reset. */ + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); + if (ret) + return ret; + + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } + + return 0; +} + static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; @@ -3516,7 +3606,7 @@ static int lan8841_config_intr(struct phy_device *phydev) if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { err = phy_read(phydev, LAN8814_INTS); - if (err) + if (err < 0) return err; /* Enable / disable interrupts. It is OK to enable PTP interrupt @@ -3532,6 +3622,14 @@ static int lan8841_config_intr(struct phy_device *phydev) return err; err = phy_read(phydev, LAN8814_INTS); + if (err < 0) + return err; + + /* Getting a positive value doesn't mean that is an error, it + * just indicates what was the status. Therefore make sure to + * clear the value and say that there is no error. + */ + err = 0; } return err; @@ -4676,7 +4774,8 @@ static int lan8841_suspend(struct phy_device *phydev) struct kszphy_priv *priv = phydev->priv; struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; - ptp_cancel_worker_sync(ptp_priv->ptp_clock); + if (ptp_priv->ptp_clock) + ptp_cancel_worker_sync(ptp_priv->ptp_clock); return genphy_suspend(phydev); } @@ -4813,10 +4912,11 @@ static struct phy_driver ksphy_driver[] = { /* PHY_BASIC_FEATURES */ .probe = kszphy_probe, .config_init = ksz8061_config_init, + .soft_reset = genphy_soft_reset, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, .suspend = kszphy_suspend, - .resume = kszphy_resume, + .resume = ksz8061_resume, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, @@ -4970,7 +5070,7 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, .suspend = genphy_suspend, - .resume = genphy_resume, + .resume = ksz9477_resume, .get_features = ksz9477_get_features, } }; diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index b2d36a3a96..e5f8ac4b46 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -107,6 +107,7 @@ struct gpy_priv { u8 fw_major; u8 fw_minor; + u32 wolopts; /* It takes 3 seconds to fully switch out of loopback mode before * it can safely re-enter loopback mode. Record the time when @@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev) } #endif +static int gpy_ack_interrupt(struct phy_device *phydev) +{ + int ret; + + /* Clear all pending interrupts */ + ret = phy_read(phydev, PHY_ISTAT); + return ret < 0 ? ret : 0; +} + static int gpy_mbox_read(struct phy_device *phydev, u32 addr) { struct gpy_priv *priv = phydev->priv; @@ -262,16 +272,8 @@ out: static int gpy_config_init(struct phy_device *phydev) { - int ret; - - /* Mask all interrupts */ - ret = phy_write(phydev, PHY_IMASK, 0); - if (ret) - return ret; - - /* Clear all pending interrupts */ - ret = phy_read(phydev, PHY_ISTAT); - return ret < 0 ? ret : 0; + /* Nothing to configure. Configuration Requirement Placeholder */ + return 0; } static int gpy21x_config_init(struct phy_device *phydev) @@ -627,11 +629,23 @@ static int gpy_read_status(struct phy_device *phydev) static int gpy_config_intr(struct phy_device *phydev) { + struct gpy_priv *priv = phydev->priv; u16 mask = 0; + int ret; + + ret = gpy_ack_interrupt(phydev); + if (ret) + return ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) mask = PHY_IMASK_MASK; + if (priv->wolopts & WAKE_MAGIC) + mask |= PHY_IMASK_WOL; + + if (priv->wolopts & WAKE_PHY) + mask |= PHY_IMASK_LSTC; + return phy_write(phydev, PHY_IMASK, mask); } @@ -678,6 +692,7 @@ static int gpy_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { struct net_device *attach_dev = phydev->attached_dev; + struct gpy_priv *priv = phydev->priv; int ret; if (wol->wolopts & WAKE_MAGIC) { @@ -725,6 +740,8 @@ static int gpy_set_wol(struct phy_device *phydev, ret = phy_read(phydev, PHY_ISTAT); if (ret < 0) return ret; + + priv->wolopts |= WAKE_MAGIC; } else { /* Disable magic packet matching */ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, @@ -732,6 +749,13 @@ static int gpy_set_wol(struct phy_device *phydev, WOL_EN); if (ret < 0) return ret; + + /* Disable the WOL interrupt */ + ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL); + if (ret < 0) + return ret; + + priv->wolopts &= ~WAKE_MAGIC; } if (wol->wolopts & WAKE_PHY) { @@ -748,9 +772,11 @@ static int gpy_set_wol(struct phy_device *phydev, if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC)) phy_trigger_machine(phydev); + priv->wolopts |= WAKE_PHY; return 0; } + priv->wolopts &= ~WAKE_PHY; /* Disable the link state change interrupt */ return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC); } @@ -758,18 +784,10 @@ static int gpy_set_wol(struct phy_device *phydev, static void gpy_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - int ret; + struct gpy_priv *priv = phydev->priv; wol->supported = WAKE_MAGIC | WAKE_PHY; - wol->wolopts = 0; - - ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL); - if (ret & WOL_EN) - wol->wolopts |= WAKE_MAGIC; - - ret = phy_read(phydev, PHY_IMASK); - if (ret & PHY_IMASK_LSTC) - wol->wolopts |= WAKE_PHY; + wol->wolopts = priv->wolopts; } static int gpy_loopback(struct phy_device *phydev, bool enable) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index f75c9eb395..52b71c7e78 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -385,18 +385,23 @@ static void sfp_fixup_rollball(struct sfp *sfp) sfp->phy_t_retry = msecs_to_jiffies(1000); } -static void sfp_fixup_fs_10gt(struct sfp *sfp) +static void sfp_fixup_fs_2_5gt(struct sfp *sfp) { - sfp_fixup_10gbaset_30m(sfp); sfp_fixup_rollball(sfp); - /* The RollBall fixup is not enough for FS modules, the AQR chip inside + /* The RollBall fixup is not enough for FS modules, the PHY chip inside * them does not return 0xffff for PHY ID registers in all MMDs for the * while initializing. They need a 4 second wait before accessing PHY. */ sfp->module_t_wait = msecs_to_jiffies(4000); } +static void sfp_fixup_fs_10gt(struct sfp *sfp) +{ + sfp_fixup_10gbaset_30m(sfp); + sfp_fixup_fs_2_5gt(sfp); +} + static void sfp_fixup_halny_gsfp(struct sfp *sfp) { /* Ignore the TX_FAULT and LOS signals on this module. @@ -472,6 +477,10 @@ static const struct sfp_quirk sfp_quirks[] = { // Rollball protocol to talk to the PHY. SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt), + // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and + // needs 4 sec wait before probing the PHY. + SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt), + // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd // NRZ in their EEPROM SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex, @@ -488,9 +497,6 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, sfp_fixup_ignore_tx_fault), - // FS 2.5G Base-T - SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g), - // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report // 2500MBd NRZ in their EEPROM SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex), @@ -502,6 +508,9 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt), SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt), + // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator + SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault), + SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc), SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g), SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc), @@ -2418,8 +2427,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) /* Handle remove event globally, it resets this state machine */ if (event == SFP_E_REMOVE) { - if (sfp->sm_mod_state > SFP_MOD_PROBE) - sfp_sm_mod_remove(sfp); + sfp_sm_mod_remove(sfp); sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0); return; } diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 7b8afa589a..284375f662 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) continue; } - /* Clone SKB */ - new_skb = skb_clone(skb, GFP_ATOMIC); + new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len); if (!new_skb) goto err; - new_skb->len = pkt_len; + skb_put(new_skb, pkt_len); + memcpy(new_skb->data, skb->data, pkt_len); skb_pull(new_skb, AQ_RX_HW_PAD); - skb_set_tail_pointer(new_skb, new_skb->len); - new_skb->truesize = SKB_TRUESIZE(new_skb->len); if (aqc111_data->rx_checksum) aqc111_rx_checksum(new_skb, pkt_desc); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 56ede5fa02..94d8b647f0 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -174,7 +174,6 @@ struct ax88179_data { u32 wol_supported; u32 wolopts; u8 disconnecting; - u8 initialized; }; struct ax88179_int_data { @@ -1676,12 +1675,21 @@ static int ax88179_reset(struct usbnet *dev) static int ax88179_net_reset(struct usbnet *dev) { - struct ax88179_data *ax179_data = dev->driver_priv; + u16 tmp16; - if (ax179_data->initialized) + ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR, + 2, &tmp16); + if (tmp16) { + ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE, + 2, 2, &tmp16); + if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) { + tmp16 |= AX_MEDIUM_RECEIVE_EN; + ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE, + 2, 2, &tmp16); + } + } else { ax88179_reset(dev); - else - ax179_data->initialized = 1; + } return 0; } diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 97afd7335d..01a3b2417a 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ecmd) { rtl8150_t *dev = netdev_priv(netdev); - short lpa, bmcr; + short lpa = 0; + short bmcr = 0; u32 supported; supported = (SUPPORTED_10baseT_Half | diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 2fa46baa58..8e82184be5 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev) static int smsc95xx_reset(struct usbnet *dev) { struct smsc95xx_priv *pdata = dev->driver_priv; - u32 read_buf, write_buf, burst_cap; + u32 read_buf, burst_cap; int ret = 0, timeout; netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n"); @@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev) return ret; netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf); + ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf); + if (ret < 0) + return ret; /* Configure GPIO pins as LED outputs */ - write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | - LED_GPIO_CFG_FDX_LED; - ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); + read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | + LED_GPIO_CFG_FDX_LED; + ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf); if (ret < 0) return ret; @@ -1810,9 +1813,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf) static void smsc95xx_rx_csum_offload(struct sk_buff *skb) { - skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); + u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2); + + skb->csum = (__force __wsum)get_unaligned(csum_ptr); skb->ip_summed = CHECKSUM_COMPLETE; - skb_trim(skb, skb->len - 2); + skb_trim(skb, skb->len - 2); /* remove csum */ } static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) @@ -1870,25 +1875,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (dev->net->features & NETIF_F_RXCSUM) smsc95xx_rx_csum_offload(skb); skb_trim(skb, skb->len - 4); /* remove fcs */ - skb->truesize = size + sizeof(struct sk_buff); return 1; } - ax_skb = skb_clone(skb, GFP_ATOMIC); + ax_skb = netdev_alloc_skb_ip_align(dev->net, size); if (unlikely(!ax_skb)) { netdev_warn(dev->net, "Error allocating skb\n"); return 0; } - ax_skb->len = size; - ax_skb->data = packet; - skb_set_tail_pointer(ax_skb, size); + skb_put(ax_skb, size); + memcpy(ax_skb->data, packet, size); if (dev->net->features & NETIF_F_RXCSUM) smsc95xx_rx_csum_offload(ax_skb); skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ - ax_skb->truesize = size + sizeof(struct sk_buff); usbnet_skb_return(dev, ax_skb); } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 3164451e10..0a662e42ed 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -421,19 +421,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_pull(skb, 3); skb->len = len; skb_set_tail_pointer(skb, len); - skb->truesize = len + sizeof(struct sk_buff); return 2; } - /* skb_clone is used for address align */ - sr_skb = skb_clone(skb, GFP_ATOMIC); + sr_skb = netdev_alloc_skb_ip_align(dev->net, len); if (!sr_skb) return 0; - sr_skb->len = len; - sr_skb->data = skb->data + 3; - skb_set_tail_pointer(sr_skb, len); - sr_skb->truesize = len + sizeof(struct sk_buff); + skb_put(sr_skb, len); + memcpy(sr_skb->data, skb->data + 3, len); usbnet_skb_return(dev, sr_skb); skb_pull(skb, len + SR_RX_OVERHEAD); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 115c3c5414..290bec2926 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1225,6 +1225,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, if (unlikely(hdr->hdr.gso_type)) goto err_xdp; + /* Partially checksummed packets must be dropped. */ + if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) + goto err_xdp; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -1542,6 +1546,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, if (unlikely(hdr->hdr.gso_type)) return NULL; + /* Partially checksummed packets must be dropped. */ + if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) + return NULL; + /* Now XDP core assumes frag size is PAGE_SIZE, but buffers * with headroom may add hole in truesize, which * make their length exceed PAGE_SIZE. So we disabled the @@ -1808,6 +1816,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, struct net_device *dev = vi->dev; struct sk_buff *skb; struct virtio_net_common_hdr *hdr; + u8 flags; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -1816,6 +1825,15 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, return; } + /* 1. Save the flags early, as the XDP program might overwrite them. + * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID + * stay valid after XDP processing. + * 2. XDP doesn't work with partially checksummed packets (refer to + * virtnet_xdp_set()), so packets marked as + * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing. + */ + flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags; + if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); @@ -1831,7 +1849,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) + if (flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; if (virtio_net_hdr_to_skb(skb, &hdr->hdr, @@ -3589,10 +3607,10 @@ static void virtnet_rx_dim_work(struct work_struct *work) if (err) pr_debug("%s: Failed to send dim parameters on rxq%d\n", dev->name, qnum); - dim->state = DIM_START_MEASURE; } } + dim->state = DIM_START_MEASURE; rtnl_unlock(); } @@ -4698,8 +4716,16 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= dev->hw_features & NETIF_F_ALL_TSO; /* (!csum && gso) case will be fixed by register_netdev() */ } - if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) - dev->features |= NETIF_F_RXCSUM; + + /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't + * need to calculate checksums for partially checksummed packets, + * as they're considered valid by the upper layer. + * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only + * receives fully checksummed packets. The device may assist in + * validating these packets' checksums, so the driver won't have to. + */ + dev->features |= NETIF_F_RXCSUM; + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) dev->features |= NETIF_F_GRO_HW; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0578864792..beebe09eb8 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) rq->data_ring.base, rq->data_ring.basePA); rq->data_ring.base = NULL; - rq->data_ring.desc_size = 0; } + rq->data_ring.desc_size = 0; } } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index bb95ce43cd..c3af9ad5e1 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -653,7 +653,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, skb->dev = dev; rcu_read_lock(); - nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); + nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); @@ -860,7 +860,7 @@ static int vrf_rt6_create(struct net_device *dev) static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - struct rtable *rt = (struct rtable *)dst; + struct rtable *rt = dst_rtable(dst); struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 3a9148fb14..b2d054f59f 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev, struct vxlan_fdb *f; u32 ifindex = 0; + /* Ignore packets from invalid src-address */ + if (!is_valid_ether_addr(src_mac)) + return true; + #if IS_ENABLED(CONFIG_IPV6) if (src_ip->sa.sa_family == AF_INET6 && (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)) @@ -1615,10 +1619,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) return false; - /* Ignore packets from invalid src-address */ - if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) - return false; - /* Get address from the outer IP header */ if (vxlan_get_sk_family(vs) == AF_INET) { saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; @@ -2528,7 +2528,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (!info) { - u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; + u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags; err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6, dst_port, ifindex, vni, diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 815f8f599f..5a55db349c 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1594,6 +1594,20 @@ static int ar5523_probe(struct usb_interface *intf, struct ar5523 *ar; int error = -ENOMEM; + static const u8 bulk_ep_addr[] = { + AR5523_CMD_TX_PIPE | USB_DIR_OUT, + AR5523_DATA_TX_PIPE | USB_DIR_OUT, + AR5523_CMD_RX_PIPE | USB_DIR_IN, + AR5523_DATA_RX_PIPE | USB_DIR_IN, + 0}; + + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) { + dev_err(&dev->dev, + "Could not find all expected endpoints\n"); + error = -ENODEV; + goto out; + } + /* * Load firmware if the device requires it. This will return * -ENXIO on success and we'll get called back afer the usb diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index f02a308a9f..34654f710d 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -171,8 +171,10 @@ struct ath_common { unsigned int clockrate; spinlock_t cc_lock; - struct ath_cycle_counters cc_ani; - struct ath_cycle_counters cc_survey; + struct_group(cc, + struct ath_cycle_counters cc_ani; + struct ath_cycle_counters cc_survey; + ); struct ath_regulatory regulatory; struct ath_regulatory reg_world_copy; diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index e6ea884caf..4f385f4a8c 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -45,6 +45,7 @@ config ATH10K_SNOC depends on ATH10K depends on ARCH_QCOM || COMPILE_TEST depends on QCOM_SMEM + depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n select QCOM_SCM select QCOM_QMI_HELPERS help diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 9ce6f49ab2..fa5e2e6518 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -720,6 +720,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .max_spatial_stream = 4, .fw = { .dir = WCN3990_HW_1_0_FW_DIR, + .board = WCN3990_HW_1_0_BOARD_DATA_FILE, + .board_size = WCN3990_BOARD_DATA_SZ, + .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .rx_desc_ops = &wcn3990_rx_desc_ops, diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 394bf3c32a..0f6de862c3 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -439,7 +439,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file, } out: mutex_unlock(&ar->conf_mutex); - return count; + return ret ?: count; } static const struct file_operations fops_peer_debug_trigger = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 93c0730919..9aa2d821b5 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -133,6 +133,7 @@ enum qca9377_chip_id_rev { /* WCN3990 1.0 definitions */ #define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990 #define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0" +#define WCN3990_HW_1_0_BOARD_DATA_FILE "board.bin" #define ATH10K_FW_FILE_BASE "firmware" #define ATH10K_FW_API_MAX 6 diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index ec556bb88d..ba37e6c7ce 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -491,4 +491,7 @@ struct host_interest { #define QCA4019_BOARD_DATA_SZ 12064 #define QCA4019_BOARD_EXT_DATA_SZ 0 +#define WCN3990_BOARD_DATA_SZ 26328 +#define WCN3990_BOARD_EXT_DATA_SZ 0 + #endif /* __TARGADDRS_H__ */ diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2e9661f4be..80d255aaff 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1763,12 +1763,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { - unsigned long time_left; + unsigned long time_left, i; time_left = wait_for_completion_timeout(&ar->wmi.service_ready, WMI_SERVICE_READY_TIMEOUT_HZ); - if (!time_left) - return -ETIMEDOUT; + if (!time_left) { + /* Sometimes the PCI HIF doesn't receive interrupt + * for the service ready message even if the buffer + * was completed. PCIe sniffer shows that it's + * because the corresponding CE ring doesn't fires + * it. Workaround here by polling CE rings once. + */ + ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); + + for (i = 0; i < CE_COUNT; i++) + ath10k_hif_send_complete_check(ar, i, 1); + + time_left = wait_for_completion_timeout(&ar->wmi.service_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + if (!time_left) { + ath10k_warn(ar, "polling timed out\n"); + return -ETIMEDOUT; + } + + ath10k_warn(ar, "service ready completion received, continuing normally\n"); + } + return 0; } diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index c78bce19bd..5d07585e59 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -595,7 +595,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .coldboot_cal_ftm = true, .cbcal_restart_fw = false, .fw_mem_mode = 0, - .num_vdevs = 16 + 1, + .num_vdevs = 3, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 9f4bf41a3d..790277f547 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1231,14 +1231,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif) enable_ps = arvif->ps; - if (!arvif->is_started) { - /* mac80211 can update vif powersave state while disconnected. - * Firmware doesn't behave nicely and consumes more power than - * necessary if PS is disabled on a non-started vdev. Hence - * force-enable PS for non-running vdevs. - */ - psmode = WMI_STA_PS_MODE_ENABLED; - } else if (enable_ps) { + if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; @@ -7896,8 +7889,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; - struct cur_regulatory_info *reg_info; - enum ieee80211_ap_reg_power power_type; mutex_lock(&ar->conf_mutex); @@ -7908,17 +7899,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, if (ath11k_wmi_supports_6ghz_cc_ext(ar) && ctx->def.chan->band == NL80211_BAND_6GHZ && arvif->vdev_type == WMI_VDEV_TYPE_STA) { - reg_info = &ab->reg_info_store[ar->pdev_idx]; - power_type = vif->bss_conf.power_type; - - ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type); - - if (power_type == IEEE80211_REG_UNSET_AP) { - ret = -EINVAL; - goto out; - } - - ath11k_reg_handle_chan_list(ab, reg_info, power_type); arvif->chanctx = *ctx; ath11k_mac_parse_tx_pwr_env(ar, vif, ctx); } @@ -9532,6 +9512,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + enum ieee80211_ap_reg_power power_type; + struct cur_regulatory_info *reg_info; struct ath11k_peer *peer; int ret = 0; @@ -9611,6 +9593,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", sta->addr, arvif->vdev_id, ret); } + + if (!ret && + ath11k_wmi_supports_6ghz_cc_ext(ar) && + arvif->vdev_type == WMI_VDEV_TYPE_STA && + arvif->chanctx.def.chan && + arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) { + reg_info = &ar->ab->reg_info_store[ar->pdev_idx]; + power_type = vif->bss_conf.power_type; + + if (power_type == IEEE80211_REG_UNSET_AP) { + ath11k_warn(ar->ab, "invalid power type %d\n", + power_type); + ret = -EINVAL; + } else { + ret = ath11k_reg_handle_chan_list(ar->ab, + reg_info, + power_type); + if (ret) + ath11k_warn(ar->ab, + "failed to handle chan list with power type %d\n", + power_type); + } + } } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { spin_lock_bh(&ar->ab->base_lock); diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 391b6fb2bd..bff4598de4 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -1132,7 +1132,6 @@ static void ath12k_core_reset(struct work_struct *work) ATH12K_RECOVER_START_TIMEOUT_HZ); ath12k_hif_power_down(ab); - ath12k_qmi_free_resource(ab); ath12k_hif_power_up(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n"); diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 52a5fb8b03..82ef4d4da6 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -6286,14 +6286,24 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, enum nl80211_band band, enum nl80211_iftype type) { - struct ieee80211_sta_eht_cap *eht_cap; + struct ieee80211_sta_eht_cap *eht_cap = NULL; enum wmi_phy_mode down_mode; + int n = ar->mac.sbands[band].n_iftype_data; + int i; + struct ieee80211_sband_iftype_data *data; if (mode < MODE_11BE_EHT20) return mode; - eht_cap = &ar->mac.iftype[band][type].eht_cap; - if (eht_cap->has_eht) + data = ar->mac.iftype[band]; + for (i = 0; i < n; i++) { + if (data[i].types_mask & BIT(type)) { + eht_cap = &data[i].eht_cap; + break; + } + } + + if (eht_cap && eht_cap->has_eht) return mode; switch (mode) { diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index 92845ffff4..e8eb996380 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -2325,8 +2325,9 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab) for (i = 0; i < ab->qmi.mem_seg_count; i++) { if (!ab->qmi.target_mem[i].v.addr) continue; + dma_free_coherent(ab->dev, - ab->qmi.target_mem[i].size, + ab->qmi.target_mem[i].prev_size, ab->qmi.target_mem[i].v.addr, ab->qmi.target_mem[i].paddr); ab->qmi.target_mem[i].v.addr = NULL; @@ -2352,6 +2353,20 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) case M3_DUMP_REGION_TYPE: case PAGEABLE_MEM_REGION_TYPE: case CALDB_MEM_REGION_TYPE: + /* Firmware reloads in recovery/resume. + * In such cases, no need to allocate memory for FW again. + */ + if (chunk->v.addr) { + if (chunk->prev_type == chunk->type && + chunk->prev_size == chunk->size) + goto this_chunk_done; + + /* cannot reuse the existing chunk */ + dma_free_coherent(ab->dev, chunk->prev_size, + chunk->v.addr, chunk->paddr); + chunk->v.addr = NULL; + } + chunk->v.addr = dma_alloc_coherent(ab->dev, chunk->size, &chunk->paddr, @@ -2370,6 +2385,10 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) chunk->type, chunk->size); return -ENOMEM; } + + chunk->prev_type = chunk->type; + chunk->prev_size = chunk->size; +this_chunk_done: break; default: ath12k_warn(ab, "memory type %u not supported\n", @@ -2666,6 +2685,19 @@ out: return ret; } +static void ath12k_qmi_m3_free(struct ath12k_base *ab) +{ + struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + + if (!m3_mem->vaddr) + return; + + dma_free_coherent(ab->dev, m3_mem->size, + m3_mem->vaddr, m3_mem->paddr); + m3_mem->vaddr = NULL; + m3_mem->size = 0; +} + static int ath12k_qmi_m3_load(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; @@ -2675,10 +2707,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) size_t m3_len; int ret; - if (m3_mem->vaddr) - /* m3 firmware buffer is already available in the DMA buffer */ - return 0; - if (ab->fw.m3_data && ab->fw.m3_len > 0) { /* firmware-N.bin had a m3 firmware file so use that */ m3_data = ab->fw.m3_data; @@ -2700,6 +2728,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) m3_len = fw->size; } + /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */ + if (m3_mem->vaddr) { + if (m3_mem->size >= m3_len) + goto skip_m3_alloc; + + /* Old buffer is too small, free and reallocate */ + ath12k_qmi_m3_free(ab); + } + m3_mem->vaddr = dma_alloc_coherent(ab->dev, m3_len, &m3_mem->paddr, GFP_KERNEL); @@ -2710,6 +2747,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) goto out; } +skip_m3_alloc: memcpy(m3_mem->vaddr, m3_data, m3_len); m3_mem->size = m3_len; @@ -2721,19 +2759,6 @@ out: return ret; } -static void ath12k_qmi_m3_free(struct ath12k_base *ab) -{ - struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - - if (!m3_mem->vaddr) - return; - - dma_free_coherent(ab->dev, m3_mem->size, - m3_mem->vaddr, m3_mem->paddr); - m3_mem->vaddr = NULL; - m3_mem->size = 0; -} - static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; @@ -3178,6 +3203,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = { .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01), .fn = ath12k_qmi_msg_fw_ready_cb, }, + + /* end of list */ + {}, }; static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h index 6ee33c9851..f34263d4be 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.h +++ b/drivers/net/wireless/ath/ath12k/qmi.h @@ -96,6 +96,8 @@ struct ath12k_qmi_event_msg { struct target_mem_chunk { u32 size; u32 type; + u32 prev_size; + u32 prev_type; dma_addr_t paddr; union { void __iomem *ioaddr; diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 9d69a17699..34de3d16ef 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -1900,7 +1900,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, if (arg->bw_160) cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); if (arg->bw_320) - cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ); + cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ); /* Typically if STBC is enabled for VHT it should be enabled * for HT as well diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a2943aaecb..01173aac30 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc) if (power_mode != ATH9K_PM_AWAKE) { spin_lock(&common->cc_lock); ath_hw_cycle_counters_update(common); - memset(&common->cc_survey, 0, sizeof(common->cc_survey)); - memset(&common->cc_ani, 0, sizeof(common->cc_ani)); + memset(&common->cc, 0, sizeof(common->cc)); spin_unlock(&common->cc_lock); } diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index e902ca80eb..0226c31a6c 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref) * carl9170_tx_fill_rateinfo() has filled the rate information * before we get to this point. */ - memset_after(&txinfo->status, 0, rates); + memset(&txinfo->pad, 0, sizeof(txinfo->pad)); + memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data)); if (atomic_read(&ar->tx_total_queued)) ar->tx_schedule = true; diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index c4edf83559..a3e03580cd 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf, ar->usb_ep_cmd_is_bulk = true; } + /* Verify that all expected endpoints are present */ + if (ar->usb_ep_cmd_is_bulk) { + u8 bulk_ep_addr[] = { + AR9170_USB_EP_RX | USB_DIR_IN, + AR9170_USB_EP_TX | USB_DIR_OUT, + AR9170_USB_EP_CMD | USB_DIR_OUT, + 0}; + u8 int_ep_addr[] = { + AR9170_USB_EP_IRQ | USB_DIR_IN, + 0}; + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || + !usb_check_int_endpoints(intf, int_ep_addr)) + err = -ENODEV; + } else { + u8 bulk_ep_addr[] = { + AR9170_USB_EP_RX | USB_DIR_IN, + AR9170_USB_EP_TX | USB_DIR_OUT, + 0}; + u8 int_ep_addr[] = { + AR9170_USB_EP_IRQ | USB_DIR_IN, + AR9170_USB_EP_CMD | USB_DIR_OUT, + 0}; + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || + !usb_check_int_endpoints(intf, int_ep_addr)) + err = -ENODEV; + } + + if (err) { + carl9170_free(ar); + return err; + } + usb_set_intfdata(intf, ar); SET_IEEE80211_DEV(ar->hw, &intf->dev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index d7fb88bb6a..06698a714b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1675,6 +1675,15 @@ struct brcmf_random_seed_footer { #define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de #define BRCMF_RANDOM_SEED_LENGTH 0x100 +static noinline_for_stack void +brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address) +{ + u8 randbuf[BRCMF_RANDOM_SEED_LENGTH]; + + get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH); + memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH); +} + static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, const struct firmware *fw, void *nvram, u32 nvram_len) @@ -1717,7 +1726,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, .length = cpu_to_le32(rand_len), .magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC), }; - void *randbuf; /* Some Apple chips/firmwares expect a buffer of random * data to be present before NVRAM @@ -1729,10 +1737,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, sizeof(footer)); address -= rand_len; - randbuf = kzalloc(rand_len, GFP_KERNEL); - get_random_bytes(randbuf, rand_len); - memcpy_toio(devinfo->tcm + address, randbuf, rand_len); - kfree(randbuf); + brcmf_pcie_provide_random_bytes(devinfo, address); } } else { brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 0bf38243f8..ce18ef9d31 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -385,6 +385,33 @@ struct iwl_dev_tx_power_cmd_v7 { __le32 timer_period; __le32 flags; } __packed; /* TX_REDUCED_POWER_API_S_VER_7 */ + +/** + * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8 + * @per_chain: per chain restrictions + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + * @flags: reduce power flags. + * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA. + * Not in use. + */ +struct iwl_dev_tx_power_cmd_v8 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; + __le32 flags; + __le32 tpc_vlp_backoff_level; +} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */ + /** * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) * @common: common part of the command @@ -392,6 +419,8 @@ struct iwl_dev_tx_power_cmd_v7 { * @v4: version 4 part of the command * @v5: version 5 part of the command * @v6: version 6 part of the command + * @v7: version 7 part of the command + * @v8: version 8 part of the command */ struct iwl_dev_tx_power_cmd { struct iwl_dev_tx_power_common common; @@ -401,6 +430,7 @@ struct iwl_dev_tx_power_cmd { struct iwl_dev_tx_power_cmd_v5 v5; struct iwl_dev_tx_power_cmd_v6 v6; struct iwl_dev_tx_power_cmd_v7 v7; + struct iwl_dev_tx_power_cmd_v8 v8; }; }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 1b7254569a..6c27ef2f7c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1821,8 +1821,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); - iwl_dbg_tlv_free(drv->trans); #endif + iwl_dbg_tlv_free(drv->trans); kfree(drv); err: return ERR_PTR(ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 535edb51d1..5416e41189 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation + * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH */ #include <linux/ieee80211.h> @@ -259,7 +259,6 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool enable) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int link_id; lockdep_assert_held(&mvm->mutex); @@ -267,37 +266,22 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm, return; /* Done already */ - if (mvmvif->bt_coex_esr_disabled == !enable) + if ((mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX) == !enable) return; - mvmvif->bt_coex_esr_disabled = !enable; - - /* Nothing to do */ - if (mvmvif->esr_active == enable) - return; - - if (enable) { - /* Try to re-enable eSR*/ - iwl_mvm_mld_select_links(mvm, vif, false); - return; - } - - /* - * Find the primary link, as we want to switch to it and drop the - * secondary one. - */ - link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links); - WARN_ON(link_id < 0); + if (enable) + mvmvif->esr_disable_reason &= ~IWL_MVM_ESR_DISABLE_COEX; + else + mvmvif->esr_disable_reason |= IWL_MVM_ESR_DISABLE_COEX; - ieee80211_set_active_links_async(vif, - vif->active_links & BIT(link_id)); + iwl_mvm_recalc_esr(mvm, vif); } /* * This function receives the LB link id and checks if eSR should be * enabled or disabled (due to BT coex) */ -bool +static bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int link_id, int primary_link) @@ -338,7 +322,7 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, if (!link_rssi) wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi; - else if (!mvmvif->bt_coex_esr_disabled) + else if (!(mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX)) /* RSSI needs to get really low to disable eSR... */ wifi_loss_rate = link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ? @@ -354,9 +338,9 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH; } -void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - int link_id) +void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int link_id) { unsigned long usable_links = ieee80211_vif_usable_links(vif); int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif, @@ -418,7 +402,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, return; } - iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id); + iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index e1c2b7fc92..855267ea6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; - __le32 *dump_data = mfu_dump_notif->data; - int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); - int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); - - for (i = 0; i < n_words; i++) - IWL_DEBUG_INFO(mvm, - "MFUART assert dump, dword %u: 0x%08x\n", - le16_to_cpu(mfu_dump_notif->index_num) * - n_words + i, - le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, @@ -894,13 +884,15 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) int ret; u16 len = 0; u32 n_subbands; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - if (cmd_ver == 7) { + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3); + + if (cmd_ver >= 7) { len = sizeof(cmd.v7); n_subbands = IWL_NUM_SUB_BANDS_V2; per_chain = cmd.v7.per_chain[0][0]; cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags); + if (cmd_ver == 8) + len = sizeof(cmd.v8); } else if (cmd_ver == 6) { len = sizeof(cmd.v6); n_subbands = IWL_NUM_SUB_BANDS_V2; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 8f4b063d62..5f6b16d3fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1399,7 +1399,9 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (tx_power == IWL_DEFAULT_MAX_TX_POWER) cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - if (cmd_ver == 7) + if (cmd_ver == 8) + len = sizeof(cmd.v8); + else if (cmd_ver == 7) len = sizeof(cmd.v7); else if (cmd_ver == 6) len = sizeof(cmd.v6); @@ -1564,6 +1566,17 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm, IWL_STA_MULTICAST); } +void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif) +{ + lockdep_assert_held(&mvm->mutex); + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + INIT_DELAYED_WORK(&mvmvif->csa_work, + iwl_mvm_channel_switch_disconnect_wk); +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1574,6 +1587,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + iwl_mvm_mac_init_mvmvif(mvm, mvmvif); + mvmvif->mvm = mvm; /* the first link always points to the default one */ @@ -1651,15 +1666,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5) - vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_device_vif = vif; iwl_mvm_tcm_add_vif(mvm, vif); - INIT_DELAYED_WORK(&mvmvif->csa_work, - iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; @@ -1697,6 +1707,8 @@ out: void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. @@ -1705,6 +1717,8 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, */ flush_work(&mvm->roc_done_wk); } + + cancel_delayed_work_sync(&mvmvif->csa_work); } /* This function is doing the common part of removing the interface for @@ -3792,6 +3806,24 @@ out: return callbacks->update_sta(mvm, vif, sta); } +static void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + unsigned long usable_links = ieee80211_vif_usable_links(vif); + u8 link_id; + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (WARN_ON_ONCE(!link_conf)) + return; + + if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ) + iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id); + } +} + static int iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -3819,6 +3851,9 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, callbacks->mac_ctxt_changed(mvm, vif, false); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); + /* Calculate eSR mode due to BT coex */ + iwl_mvm_bt_coex_update_vif_esr(mvm, vif); + /* when client is authorized (AP station marked as such), * try to enable more links */ @@ -4608,7 +4643,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm, if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) { ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); - } else if (fw_ver == 3) { + } else if (fw_ver >= 3) { ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, ROC_ACTIVITY_HOTSPOT); } else { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 084314bf6f..43f3002ede 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -14,6 +14,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + iwl_mvm_mac_init_mvmvif(mvm, mvmvif); + mvmvif->mvm = mvm; /* Not much to do here. The stack will not allow interface @@ -73,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, goto out_free_bf; iwl_mvm_tcm_add_vif(mvm, vif); - INIT_DELAYED_WORK(&mvmvif->csa_work, - iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; @@ -92,6 +92,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, mvm->csme_vif = vif; } + if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5) + vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; + goto out_unlock; out_free_bf: @@ -189,17 +192,13 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif) +static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif) { unsigned int n_active = 0; int i; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { - struct ieee80211_bss_conf *link_conf; - - link_conf = link_conf_dereference_protected(vif, i); - if (link_conf && - rcu_access_pointer(link_conf->chanctx_conf)) + if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt) n_active++; } @@ -245,18 +244,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - unsigned int n_active = iwl_mvm_mld_count_active_links(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif); unsigned int link_id = link_conf->link_id; int ret; - /* if the assigned one was not counted yet, count it now */ - if (!rcu_access_pointer(link_conf->chanctx_conf)) - n_active++; - if (WARN_ON_ONCE(!mvmvif->link[link_id])) return -EINVAL; + /* if the assigned one was not counted yet, count it now */ + if (!mvmvif->link[link_id]->phy_ctxt) + n_active++; + /* mac parameters such as HE support can change at this stage * For sta, need first to configure correct state from drv_sta_state * and only after that update mac config. @@ -296,13 +295,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, * this needs the phy context assigned (and in FW?), and we cannot * do it later because it needs to be initialized as soon as we're * able to TX on the link, i.e. when active. - * - * Firmware restart isn't quite correct yet for MLO, but we don't - * need to do it in that case anyway since it will happen from the - * normal station state callback. */ - if (mvmvif->ap_sta && - !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (mvmvif->ap_sta) { struct ieee80211_link_sta *link_sta; rcu_read_lock(); @@ -416,7 +410,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif); unsigned int link_id = link_conf->link_id; /* shouldn't happen, but verify link_id is valid before accessing */ @@ -608,10 +602,49 @@ struct iwl_mvm_link_sel_data { bool active; }; -static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a, +static bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif, + struct iwl_mvm_link_sel_data *a, struct iwl_mvm_link_sel_data *b) { - return a->band != b->band; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (a->band == b->band) + return false; + + /* BT Coex effects eSR mode only if one of the link is on LB */ + if (a->band == NL80211_BAND_2GHZ || b->band == NL80211_BAND_2GHZ) + return !(mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX); + + return true; +} + +static u8 +iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif, + struct iwl_mvm_link_sel_data *data, + unsigned long usable_links) +{ + u8 n_data = 0; + unsigned long link_id; + + rcu_read_lock(); + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(vif->link_conf[link_id]); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + data[n_data].link_id = link_id; + data[n_data].band = link_conf->chanreq.oper.chan->band; + data[n_data].width = link_conf->chanreq.oper.width; + data[n_data].active = vif->active_links & BIT(link_id); + n_data++; + } + + rcu_read_unlock(); + + return n_data; } void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -621,7 +654,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned long usable_links = ieee80211_vif_usable_links(vif); u32 max_active_links = iwl_mvm_max_active_links(mvm, vif); u16 new_active_links; - u8 link_id, n_data = 0, i, j; + u8 n_data, i, j; if (!IWL_MVM_AUTO_EML_ENABLE) return; @@ -646,23 +679,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (hweight16(vif->active_links) == max_active_links) return; - rcu_read_lock(); - - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_bss_conf *link_conf = - rcu_dereference(vif->link_conf[link_id]); - - if (WARN_ON_ONCE(!link_conf)) - continue; - - data[n_data].link_id = link_id; - data[n_data].band = link_conf->chanreq.oper.chan->band; - data[n_data].width = link_conf->chanreq.oper.width; - data[n_data].active = vif->active_links & BIT(link_id); - n_data++; - } - - rcu_read_unlock(); + n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links); /* this is expected to be the current active link */ if (n_data == 1) @@ -686,7 +703,8 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (i == j) continue; - if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j])) + if (iwl_mvm_mld_valid_link_pair(vif, &data[i], + &data[j])) break; } @@ -702,7 +720,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (i == j) continue; - if (iwl_mvm_mld_valid_link_pair(&data[i], + if (iwl_mvm_mld_valid_link_pair(vif, &data[i], &data[j])) break; } @@ -1264,6 +1282,33 @@ int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm, return data[1].link_id; } +void iwl_mvm_recalc_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool enable = !mvmvif->esr_disable_reason; + int link_id; + + /* Nothing to do */ + if (mvmvif->esr_active == enable) + return; + + if (enable) { + /* Try to re-enable eSR */ + iwl_mvm_mld_select_links(mvm, vif, false); + return; + } + + /* + * Find the primary link, as we want to switch to it and drop the + * secondary one. + */ + link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links); + WARN_ON(link_id < 0); + + ieee80211_set_active_links_async(vif, + vif->active_links & BIT(link_id)); +} + /* * This function receives a bitmap of usable links and check if we can enter * eSR on those links. @@ -1272,14 +1317,13 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned long desired_links) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif, - desired_links); + u16 usable_links = ieee80211_vif_usable_links(vif); + struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; const struct wiphy_iftype_ext_capab *ext_capa; - bool ret = true; - int link_id; + u8 n_data; - if (primary_link < 0) + if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc || + hweight16(usable_links) <= 1) return false; if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP)) @@ -1291,26 +1335,13 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm, !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP)) return false; - for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, link_id); - - if (WARN_ON_ONCE(!link_conf)) - continue; + n_data = iwl_mvm_set_link_selection_data(vif, data, desired_links); - /* BT Coex effects eSR mode only if one of the link is on LB */ - if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ) - continue; + if (n_data != 2) + return false; - ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id, - primary_link); - // Mark eSR as disabled for the next time - if (!ret) - mvmvif->bt_coex_esr_disabled = true; - break; - } - return ret; + return iwl_mvm_mld_valid_link_pair(vif, &data[0], &data[1]); } static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 23e64a757c..36dc291d98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -9,7 +9,9 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int filter_link_id) { + struct ieee80211_link_sta *link_sta; struct iwl_mvm_sta *mvmsta; + struct ieee80211_vif *vif; unsigned int link_id; u32 result = 0; @@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return 0; mvmsta = iwl_mvm_sta_from_mac80211(sta); + vif = mvmsta->vif; /* it's easy when the STA is not an MLD */ if (!sta->valid_links) return BIT(mvmsta->deflink.sta_id); /* but if it is an MLD, get the mask of all the FW STAs it has ... */ - for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) { - struct iwl_mvm_link_sta *link_sta; + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_link_sta *mvm_link_sta; /* unless we have a specific link in mind */ if (filter_link_id >= 0 && link_id != filter_link_id) continue; - link_sta = + mvm_link_sta = rcu_dereference_check(mvmsta->link[link_id], lockdep_is_held(&mvm->mutex)); - if (!link_sta) + if (!mvm_link_sta) continue; - result |= BIT(link_sta->sta_id); + result |= BIT(mvm_link_sta->sta_id); } return result; @@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; unsigned int link_id; int ret; lockdep_assert_held(&mvm->mutex); - for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { - if (!rcu_access_pointer(sta->link[link_id]) || - mvm_sta->link[link_id]) + for_each_sta_active_link(vif, sta, link_sta, link_id) { + if (WARN_ON(mvm_sta->link[link_id])) continue; ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f0b24f0093..d1ab35eb55 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -347,6 +347,14 @@ struct iwl_mvm_vif_link_info { }; /** + * enum iwl_mvm_esr_disable_reason - reasons for which we can't enable EMLSR + * @IWL_MVM_ESR_DISABLE_COEX: COEX is preventing the enablement of EMLSR + */ +enum iwl_mvm_esr_disable_reason { + IWL_MVM_ESR_DISABLE_COEX = BIT(0), +}; + +/** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @mvm: pointer back to the mvm struct * @id: between 0 and 3 @@ -361,7 +369,6 @@ struct iwl_mvm_vif_link_info { * @pm_enabled - indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex * @low_latency: bit flags for low latency * see enum &iwl_mvm_low_latency_cause for causes. * @low_latency_actual: boolean, indicates low latency is set, @@ -378,6 +385,7 @@ struct iwl_mvm_vif_link_info { * @deflink: default link data for use in non-MLO * @link: link data for each link in MLO * @esr_active: indicates eSR mode is active + * @esr_disable_reason: a bitmap of enum iwl_mvm_esr_disable_reason * @pm_enabled: indicates powersave is enabled */ struct iwl_mvm_vif { @@ -392,7 +400,6 @@ struct iwl_mvm_vif { bool pm_enabled; bool monitor_active; bool esr_active; - bool bt_coex_esr_disabled; u8 low_latency: 6; u8 low_latency_actual: 1; @@ -400,6 +407,7 @@ struct iwl_mvm_vif { u8 authorized:1; bool ps_disabled; + u32 esr_disable_reason; u32 ap_beacon_time; struct iwl_mvm_vif_bf_data bf_data; @@ -1572,15 +1580,14 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, struct iwl_trans *trans = mvm->fwrt.trans; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - lockdep_assert_held(&mvm->mutex); - if (vif->type == NL80211_IFTYPE_AP) return mvm->fw->ucode_capa.num_beacons; + /* Check if HW supports eSR or STR */ if ((iwl_mvm_is_esr_supported(trans) && - !mvmvif->bt_coex_esr_disabled) || - ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && - CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))) + !(mvmvif->esr_disable_reason & ~IWL_MVM_ESR_DISABLE_COEX)) || + (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && + CSR_HW_RFID_IS_CDB(trans->hw_rf_id))) return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM; return 1; @@ -1776,6 +1783,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); +void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif); + /* * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_<NAME OF THE CMD> @@ -2133,12 +2142,9 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); -bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - int link_id, int primary_link); -void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - int link_id); +void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int link_id); /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -2779,4 +2785,8 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, int duration, u32 activity); + +/* EMLSR */ +void iwl_mvm_recalc_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 376b23b409..6cd4ec4d8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -122,13 +122,8 @@ enum { #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -/* - * FIXME - various places in firmware API still use u8, - * e.g. LQ command and SCD config command. - * This should be 256 instead. - */ -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index b1add7942c..395aef04f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -889,8 +889,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, if (link_info->phy_ctxt && link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ) - iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif, - link_id); + iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif, + link_id); /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index ce8d83c771..8ac5c045fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -2456,8 +2456,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, * * We mark it as mac header, for upper layers to know where * all radio tap header ends. + * + * Since data doesn't move data while putting data on skb and that is + * the only way we use, data + len is the next place that hdr would be put */ - skb_reset_mac_header(skb); + skb_set_mac_header(skb, skb->len); /* * Override the nss from the rx_vec since the rate_n_flags has diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 11559563ae..525d8efcc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1303,7 +1303,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else @@ -1405,7 +1405,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) general_params->adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) general_params->adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else @@ -3171,8 +3171,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_mvm_vif_link_info *link_info = scan_vif->link[mvm->scan_link_id]; - if (!WARN_ON(!link_info)) + /* It is possible that by the time the scan is complete the link + * was already removed and is not valid. + */ + if (link_info) memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN); + else + IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n"); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_vif = NULL; diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index ce8fea76db..7a4ef5c83b 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -2718,7 +2718,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); cmd->numaddr = cpu_to_le16(mc_count); netdev_hw_addr_list_for_each(ha, mc_list) { - memcpy(cmd->addr[i], ha->addr, ETH_ALEN); + memcpy(cmd->addr[i++], ha->addr, ETH_ALEN); } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 7a2f5d3856..14304b0637 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -4,6 +4,13 @@ #include "mac.h" #include "../dma.h" +static const u8 wmm_queue_map[] = { + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 3, +}; + static void mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) { @@ -22,10 +29,10 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) struct ieee80211_sta *sta; struct mt7603_sta *msta; struct mt76_wcid *wcid; + u8 tid = 0, hwq = 0; void *priv; int idx; u32 val; - u8 tid = 0; if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr)) goto free; @@ -42,19 +49,36 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) goto free; priv = msta = container_of(wcid, struct mt7603_sta, wcid); - val = le32_to_cpu(txd[0]); - val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); - val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT); - txd[0] = cpu_to_le32(val); sta = container_of(priv, struct ieee80211_sta, drv_priv); hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE]; - if (ieee80211_is_data_qos(hdr->frame_control)) + + hwq = wmm_queue_map[IEEE80211_AC_BE]; + if (ieee80211_is_data_qos(hdr->frame_control)) { tid = *ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_TAG1D_MASK; - skb_set_queue_mapping(skb, tid_to_ac[tid]); + IEEE80211_QOS_CTL_TAG1D_MASK; + u8 qid = tid_to_ac[tid]; + hwq = wmm_queue_map[qid]; + skb_set_queue_mapping(skb, qid); + } else if (ieee80211_is_data(hdr->frame_control)) { + skb_set_queue_mapping(skb, IEEE80211_AC_BE); + hwq = wmm_queue_map[IEEE80211_AC_BE]; + } else { + skb_pull(skb, MT_TXD_SIZE); + if (!ieee80211_is_bufferable_mmpdu(skb)) + goto free; + skb_push(skb, MT_TXD_SIZE); + skb_set_queue_mapping(skb, MT_TXQ_PSD); + hwq = MT_TX_HW_QUEUE_MGMT; + } + ieee80211_sta_set_buffered(sta, tid, true); + val = le32_to_cpu(txd[0]); + val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); + val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq); + txd[0] = cpu_to_le32(val); + spin_lock_bh(&dev->ps_lock); __skb_queue_tail(&msta->psq, skb); if (skb_queue_len(&msta->psq) >= 64) { @@ -151,12 +175,6 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget) int mt7603_dma_init(struct mt7603_dev *dev) { - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_BK] = 0, - [IEEE80211_AC_BE] = 1, - [IEEE80211_AC_VI] = 2, - [IEEE80211_AC_VO] = 3, - }; int ret; int i; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index cf21d06257..dc8a77f0a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -1393,6 +1393,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev) MT_CLIENT_RESET_TX_R_E_2_S); /* Start PSE client TX abort */ + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, MT_CLIENT_RESET_TX_R_E_1_S, 500); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 0971c164b5..c27acaf0eb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw, #endif /* CONFIG_PM */ const struct ieee80211_ops mt7615_ops = { + .add_chanctx = ieee80211_emulate_add_chanctx, + .remove_chanctx = ieee80211_emulate_remove_chanctx, + .change_chanctx = ieee80211_emulate_change_chanctx, + .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = mt7615_tx, .start = mt7615_start, .stop = mt7615_stop, diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index af0c2b2aac..fb8bd50eb7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif, }; struct sk_buff *skb; - if (is_mt799x(dev) && !wcid->sta) + if (wcid && !wcid->sta) hdr.muar_idx = 0xe; mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo, @@ -2527,6 +2527,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) __le16 tag; __le16 len; u8 suspend; + u8 pad[7]; } __packed hif_suspend; } req = { .hif_suspend = { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 6c3696c8c7..450f4d2211 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -1049,6 +1049,7 @@ static ssize_t mt7915_rate_txpower_set(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { + int i, ret, pwr, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0; struct mt7915_phy *phy = file->private_data; struct mt7915_dev *dev = phy->dev; struct mt76_phy *mphy = phy->mt76; @@ -1057,7 +1058,6 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf, .band_idx = phy->mt76->band_idx, }; char buf[100]; - int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0; enum mac80211_rx_encoding mode; u32 offs = 0, len = 0; @@ -1130,8 +1130,8 @@ skip: if (ret) goto out; - mphy->txpower_cur = max(mphy->txpower_cur, - max(pwr160, max(pwr80, max(pwr40, pwr20)))); + pwr = max3(pwr80, pwr40, pwr20); + mphy->txpower_cur = max3(mphy->txpower_cur, pwr160, pwr); out: mutex_unlock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 867e14f6b9..73e42ef429 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -663,6 +663,7 @@ void mt7921_mac_reset_work(struct work_struct *work) int i, ret; dev_dbg(dev->mt76.dev, "chip reset\n"); + set_bit(MT76_RESET, &dev->mphy.state); dev->hw_full_reset = true; ieee80211_stop_queues(hw); @@ -691,6 +692,7 @@ void mt7921_mac_reset_work(struct work_struct *work) } dev->hw_full_reset = false; + clear_bit(MT76_RESET, &dev->mphy.state); pm->suspended = false; ieee80211_wake_queues(hw); ieee80211_iterate_active_interfaces(hw, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c index c866144ff0..031ba9aaa4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -64,7 +64,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev) mt76_wr(dev, dev->irq_map->host_irq_enable, 0); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); - set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); skb_queue_purge(&dev->mt76.mcu.res_q); @@ -115,7 +114,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev) err = __mt7921_start(&dev->phy); out: - clear_bit(MT76_RESET, &dev->mphy.state); local_bh_disable(); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c index 389eb09038..1f77cf71ca 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev) mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); mt76_txq_schedule_all(&dev->mphy); mt76_worker_disable(&dev->mt76.tx_worker); - set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); skb_queue_purge(&dev->mt76.mcu.res_q); @@ -135,7 +134,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev) err = __mt7921_start(&dev->phy); out: - clear_bit(MT76_RESET, &dev->mphy.state); mt76_worker_enable(&dev->mt76.tx_worker); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index 2a0bbfe7bf..b8315a89f4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -535,7 +535,7 @@ struct mt7925_wow_pattern_tlv { u8 offset; u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN]; u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN]; - u8 rsv[4]; + u8 rsv[7]; } __packed; static inline enum connac3_mcu_cipher_type diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index b44abe2acc..e86c05d0ee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -3729,6 +3729,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy) } __packed * res; struct sk_buff *skb; int ret; + u32 temp; ret = mt76_mcu_send_and_get_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL), &req, sizeof(req), true, &skb); @@ -3736,8 +3737,10 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy) return ret; res = (void *)skb->data; + temp = le32_to_cpu(res->temperature); + dev_kfree_skb(skb); - return le32_to_cpu(res->temperature); + return temp; } int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state) @@ -4464,7 +4467,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) u8 band_idx; } __packed req = { .tag = cpu_to_le16(UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL), - .len = cpu_to_le16(sizeof(req) + MT7996_SKU_RATE_NUM - 4), + .len = cpu_to_le16(sizeof(req) + MT7996_SKU_PATH_NUM - 4), .power_ctrl_id = UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL, .power_limit_type = TX_POWER_LIMIT_TABLE_RATE, .band_idx = phy->mt76->band_idx, @@ -4479,7 +4482,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) mphy->txpower_cur = tx_power; skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, - sizeof(req) + MT7996_SKU_RATE_NUM); + sizeof(req) + MT7996_SKU_PATH_NUM); if (!skb) return -ENOMEM; @@ -4503,6 +4506,9 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) /* eht */ skb_put_data(skb, &la.eht[0], sizeof(la.eht)); + /* padding */ + skb_put_zero(skb, MT7996_SKU_PATH_NUM - MT7996_SKU_RATE_NUM); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WM_UNI_CMD(TXPOWER), true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index 304e5fd148..928a9663b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -519,7 +519,7 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t) struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet); struct mtk_wed_device *wed = &dev->mt76.mmio.wed; struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2; - u32 i, intr, mask, intr1; + u32 i, intr, mask, intr1 = 0; if (dev->hif2 && mtk_wed_device_active(wed_hif2)) { mtk_wed_device_irq_set_mask(wed_hif2, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 36d1f247d5..ddeb40d522 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -50,6 +50,7 @@ #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ #define MT7996_SKU_RATE_NUM 417 +#define MT7996_SKU_PATH_NUM 494 #define MT7996_MAX_TWT_AGRT 16 #define MT7996_MAX_STA_TWT_AGRT 8 diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index 3e88798df0..a4ed00eebc 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -499,7 +499,8 @@ static void mt76s_tx_status_data(struct mt76_worker *worker) dev = container_of(sdio, struct mt76_dev, sdio); while (true) { - if (test_bit(MT76_REMOVED, &dev->phy.state)) + if (test_bit(MT76_RESET, &dev->phy.state) || + test_bit(MT76_REMOVED, &dev->phy.state)) break; if (!dev->drv->tx_status_data(dev, &update)) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index fd92d23c43..16d884a3d8 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -122,6 +122,15 @@ enum rtl8xxxu_rx_type { RX_TYPE_ERROR = -1 }; +enum rtl8xxxu_rx_desc_enc { + RX_DESC_ENC_NONE = 0, + RX_DESC_ENC_WEP40 = 1, + RX_DESC_ENC_TKIP_WO_MIC = 2, + RX_DESC_ENC_TKIP_MIC = 3, + RX_DESC_ENC_AES = 4, + RX_DESC_ENC_WEP104 = 5, +}; + struct rtl8xxxu_rxdesc16 { #ifdef __LITTLE_ENDIAN u32 pktlen:14; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 4a49f8f9d8..fbbc97161f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1505,13 +1505,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS]; u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS]; u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b; - u8 val8; + u8 val8, base; int group, i; group = rtl8xxxu_gen1_channel_to_group(channel); - cck[0] = priv->cck_tx_power_index_A[group] - 1; - cck[1] = priv->cck_tx_power_index_B[group] - 1; + cck[0] = priv->cck_tx_power_index_A[group]; + cck[1] = priv->cck_tx_power_index_B[group]; if (priv->hi_pa) { if (cck[0] > 0x20) @@ -1522,10 +1522,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) ofdm[0] = priv->ht40_1s_tx_power_index_A[group]; ofdm[1] = priv->ht40_1s_tx_power_index_B[group]; - if (ofdm[0]) - ofdm[0] -= 1; - if (ofdm[1]) - ofdm[1] -= 1; ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a; ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b; @@ -1614,20 +1610,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a + power_base->reg_0e1c); + val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000); for (i = 0; i < 3; i++) { - if (i != 2) - val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0; - else - val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0; + base = i != 2 ? 8 : 6; + val8 = max_t(int, val8 - base, 0); rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8); } + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b + power_base->reg_0868); + val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000); for (i = 0; i < 3; i++) { - if (i != 2) - val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0; - else - val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0; + base = i != 2 ? 8 : 6; + val8 = max_t(int, val8 - base, 0); rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8); } } @@ -6473,7 +6468,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) rx_status->mactime = rx_desc->tsfl; rx_status->flag |= RX_FLAG_MACTIME_START; - if (!rx_desc->swdec) + if (!rx_desc->swdec && + rx_desc->security != RX_DESC_ENC_NONE) rx_status->flag |= RX_FLAG_DECRYPTED; if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -6578,7 +6574,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb) rx_status->mactime = rx_desc->tsfl; rx_status->flag |= RX_FLAG_MACTIME_START; - if (!rx_desc->swdec) + if (!rx_desc->swdec && + rx_desc->security != RX_DESC_ENC_NONE) rx_status->flag |= RX_FLAG_DECRYPTED; if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -7998,6 +7995,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, MFP_CAPABLE); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 2e60a6991c..42b7db12b1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { - rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, - "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n", - hw->conf.long_frame_max_tx_count); - /* brought up everything changes (changed == ~0) indicates first - * open, so use our default value instead of that of wiphy. - */ - if (changed != ~0) { - mac->retry_long = hw->conf.long_frame_max_tx_count; - mac->retry_short = hw->conf.long_frame_max_tx_count; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, - (u8 *)(&hw->conf.long_frame_max_tx_count)); - } - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL && !rtlpriv->proximity.proxim_on) { struct ieee80211_channel *channel = hw->conf.chandef.chan; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index d835a27429..56b5cd032a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -892,8 +892,8 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) u8 place = chnl; if (chnl > 14) { - for (place = 14; place < ARRAY_SIZE(channel5g); place++) { - if (channel5g[place] == chnl) { + for (place = 14; place < ARRAY_SIZE(channel_all); place++) { + if (channel_all[place] == chnl) { place++; break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 192982ec81..cbc7b4dbea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -35,7 +35,7 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw, static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, struct rtl_stats *pstats, - struct rx_desc_92d *pdesc, + __le32 *pdesc, struct rx_fwinfo_92d *p_drvinfo, bool packet_match_bssid, bool packet_toself, @@ -50,8 +50,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; bool is_cck_rate; + u8 rxmcs; - is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs); + rxmcs = get_rx_desc_rxmcs(pdesc); + is_cck_rate = rxmcs <= DESC_RATE11M; pstats->packet_matchbssid = packet_match_bssid; pstats->packet_toself = packet_toself; pstats->packet_beacon = packet_beacon; @@ -157,8 +159,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, pstats->rx_pwdb_all = pwdb_all; pstats->rxpower = rx_pwr_all; pstats->recvsignalpower = rx_pwr_all; - if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 && - pdesc->rxmcs <= DESC_RATEMCS15) + if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 && + rxmcs <= DESC_RATEMCS15) max_spatial_stream = 2; else max_spatial_stream = 1; @@ -364,7 +366,7 @@ static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw, static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_stats *pstats, - struct rx_desc_92d *pdesc, + __le32 *pdesc, struct rx_fwinfo_92d *p_drvinfo) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -413,7 +415,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, stats->icv = (u16)get_rx_desc_icv(pdesc); stats->crc = (u16)get_rx_desc_crc32(pdesc); stats->hwerror = (stats->crc | stats->icv); - stats->decrypted = !get_rx_desc_swdec(pdesc); + stats->decrypted = !get_rx_desc_swdec(pdesc) && + get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE; stats->rate = (u8)get_rx_desc_rxmcs(pdesc); stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc); stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); @@ -426,8 +429,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, rx_status->band = hw->conf.chandef.chan->band; if (get_rx_desc_crc32(pdesc)) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (!get_rx_desc_swdec(pdesc)) - rx_status->flag |= RX_FLAG_DECRYPTED; if (get_rx_desc_bw(pdesc)) rx_status->bw = RATE_INFO_BW_40; if (get_rx_desc_rxht(pdesc)) @@ -441,9 +442,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, if (phystatus) { p_drvinfo = (struct rx_fwinfo_92d *)(skb->data + stats->rx_bufshift); - _rtl92de_translate_rx_signal_stuff(hw, - skb, stats, - (struct rx_desc_92d *)pdesc, + _rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc, p_drvinfo); } /*rx_status->qual = stats->signal; */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index 2992668c15..2d4887490f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -14,6 +14,15 @@ #define USB_HWDESC_HEADER_LEN 32 #define CRCLENGTH 4 +enum rtl92d_rx_desc_enc { + RX_DESC_ENC_NONE = 0, + RX_DESC_ENC_WEP40 = 1, + RX_DESC_ENC_TKIP_WO_MIC = 2, + RX_DESC_ENC_TKIP_MIC = 3, + RX_DESC_ENC_AES = 4, + RX_DESC_ENC_WEP104 = 5, +}; + /* macros to read/write various fields in RX or TX descriptors */ static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) @@ -246,6 +255,11 @@ static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc) return le32_get_bits(*__pdesc, GENMASK(19, 16)); } +static inline u32 get_rx_desc_enc_type(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(22, 20)); +} + static inline u32 get_rx_desc_shift(__le32 *__pdesc) { return le32_get_bits(*__pdesc, GENMASK(25, 24)); @@ -380,10 +394,17 @@ struct rx_fwinfo_92d { u8 csi_target[2]; u8 sigevm; u8 max_ex_pwr; +#ifdef __LITTLE_ENDIAN u8 ex_intf_flag:1; u8 sgi_en:1; u8 rxsc:2; u8 reserve:4; +#else + u8 reserve:4; + u8 rxsc:2; + u8 sgi_en:1; + u8 ex_intf_flag:1; +#endif } __packed; struct tx_desc_92d { @@ -488,64 +509,6 @@ struct tx_desc_92d { u32 reserve_pass_pcie_mm_limit[4]; } __packed; -struct rx_desc_92d { - u32 length:14; - u32 crc32:1; - u32 icverror:1; - u32 drv_infosize:4; - u32 security:3; - u32 qos:1; - u32 shift:2; - u32 phystatus:1; - u32 swdec:1; - u32 lastseg:1; - u32 firstseg:1; - u32 eor:1; - u32 own:1; - - u32 macid:5; - u32 tid:4; - u32 hwrsvd:5; - u32 paggr:1; - u32 faggr:1; - u32 a1_fit:4; - u32 a2_fit:4; - u32 pam:1; - u32 pwr:1; - u32 moredata:1; - u32 morefrag:1; - u32 type:2; - u32 mc:1; - u32 bc:1; - - u32 seq:12; - u32 frag:4; - u32 nextpktlen:14; - u32 nextind:1; - u32 rsvd:1; - - u32 rxmcs:6; - u32 rxht:1; - u32 amsdu:1; - u32 splcp:1; - u32 bandwidth:1; - u32 htc:1; - u32 tcpchk_rpt:1; - u32 ipcchk_rpt:1; - u32 tcpchk_valid:1; - u32 hwpcerr:1; - u32 hwpcind:1; - u32 iv0:16; - - u32 iv1; - - u32 tsfl; - - u32 bufferaddress; - u32 bufferaddress64; - -} __packed; - void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc, u8 *pbd_desc_tx, struct ieee80211_tx_info *info, diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index d474b8d5df..b8d419a5b9 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -4069,6 +4069,24 @@ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg eve } } +void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks) +{ + const struct dmi_system_id *match; + enum rtw89_quirks quirk; + + if (!quirks) + return; + + for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) { + quirk = (uintptr_t)match->driver_data; + if (quirk >= NUM_OF_RTW89_QUIRKS) + continue; + + set_bit(quirk, rtwdev->quirks); + } +} +EXPORT_SYMBOL(rtw89_check_quirks); + int rtw89_core_start(struct rtw89_dev *rtwdev) { int ret; diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 2e854c9af7..509d84a493 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -7,6 +7,7 @@ #include <linux/average.h> #include <linux/bitfield.h> +#include <linux/dmi.h> #include <linux/firmware.h> #include <linux/iopoll.h> #include <linux/workqueue.h> @@ -3977,6 +3978,7 @@ union rtw89_bus_info { struct rtw89_driver_info { const struct rtw89_chip_info *chip; + const struct dmi_system_id *quirks; union rtw89_bus_info bus; }; @@ -4324,6 +4326,12 @@ enum rtw89_flags { NUM_OF_RTW89_FLAGS, }; +enum rtw89_quirks { + RTW89_QUIRK_PCI_BER, + + NUM_OF_RTW89_QUIRKS, +}; + enum rtw89_pkt_drop_sel { RTW89_PKT_DROP_SEL_MACID_BE_ONCE, RTW89_PKT_DROP_SEL_MACID_BK_ONCE, @@ -5084,6 +5092,7 @@ struct rtw89_dev { DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM); DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS); DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM); + DECLARE_BITMAP(quirks, NUM_OF_RTW89_QUIRKS); struct rtw89_phy_stat phystat; struct rtw89_rfk_wait_info rfk_wait; @@ -6129,6 +6138,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, struct cfg80211_tid_config *tid_config); +void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks); int rtw89_core_init(struct rtw89_dev *rtwdev); void rtw89_core_deinit(struct rtw89_dev *rtwdev); int rtw89_core_register(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 31d1ffb16e..c4707a036b 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -318,7 +318,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev, u8 sifs; slot_time = vif->bss_conf.use_short_slot ? 9 : 20; - sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10; + sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16; return aifsn * slot_time + sifs; } diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 19001130ad..3b0d97da04 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1089,7 +1089,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, spin_lock_bh(&rtwpci->trx_lock); cnt = rtw89_pci_get_avail_txbd_num(tx_ring); - cnt = min(cnt, wd_ring->curr_num); + if (txch != RTW89_TXCH_CH12) + cnt = min(cnt, wd_ring->curr_num); spin_unlock_bh(&rtwpci->trx_lock); return cnt; @@ -2298,6 +2299,22 @@ static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) return 0; } +static void rtw89_pci_ber(struct rtw89_dev *rtwdev) +{ + u32 phy_offset; + + if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) + return; + + phy_offset = R_RAC_DIRECT_OFFSET_G1; + rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); + rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); + + phy_offset = R_RAC_DIRECT_OFFSET_G2; + rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); + rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); +} + static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) { if (rtwdev->chip->chip_id != RTL8852A) @@ -2695,6 +2712,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) const struct rtw89_pci_info *info = rtwdev->pci_info; int ret; + rtw89_pci_ber(rtwdev); rtw89_pci_rxdma_prefth(rtwdev); rtw89_pci_l1off_pwroff(rtwdev); rtw89_pci_deglitch_setting(rtwdev); @@ -4171,6 +4189,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; + rtw89_check_quirks(rtwdev, info->quirks); + SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); ret = rtw89_core_init(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index a63b6b7c9b..87e7081664 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -26,11 +26,16 @@ #define RAC_REG_FLD_0 0x1D #define BAC_AUTOK_N_MASK GENMASK(3, 2) #define PCIE_AUTOK_4 0x3 +#define RAC_ANA1E 0x1E +#define RAC_ANA1E_G1_VAL 0x66EA +#define RAC_ANA1E_G2_VAL 0x6EEA #define RAC_ANA1F 0x1F #define RAC_ANA24 0x24 #define B_AX_DEGLITCH GENMASK(11, 8) #define RAC_ANA26 0x26 #define B_AX_RXEN GENMASK(15, 14) +#define RAC_ANA2E 0x2E +#define RAC_ANA2E_VAL 0xFFFE #define RAC_CTRL_PPR_V1 0x30 #define B_AX_CLK_CALIB_EN BIT(12) #define B_AX_CALIB_EN BIT(13) diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index 31290d8cb7..92074b73eb 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -55,7 +55,8 @@ static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev, static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter) { - if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode)) + if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode) && + !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) rtw89_ps_power_mode_change_with_hci(rtwdev, enter); else rtw89_mac_power_mode_change(rtwdev, enter); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c index ca1374a717..ec3629d95f 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c @@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = { static const struct rtw89_driver_info rtw89_8851be_info = { .chip = &rtw8851b_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8851b_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 7c6ffedb77..fdee5dd4ba 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { static const struct rtw89_driver_info rtw89_8852ae_info = { .chip = &rtw8852a_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8852a_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c index ed71364e64..5f94112265 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c @@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = { static const struct rtw89_driver_info rtw89_8852be_info = { .chip = &rtw8852b_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8852b_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 583ea673a4..e07c7f3ade 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -68,8 +68,31 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .recognize_intrs = rtw89_pci_recognize_intrs_v1, }; +static const struct dmi_system_id rtw8852c_pci_quirks[] = { + { + .ident = "Dell Inc. Vostro 16 5640", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 16 5640"), + DMI_MATCH(DMI_PRODUCT_SKU, "0CA0"), + }, + .driver_data = (void *)RTW89_QUIRK_PCI_BER, + }, + { + .ident = "Dell Inc. Inspiron 16 5640", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5640"), + DMI_MATCH(DMI_PRODUCT_SKU, "0C9F"), + }, + .driver_data = (void *)RTW89_QUIRK_PCI_BER, + }, + {}, +}; + static const struct rtw89_driver_info rtw89_8852ce_info = { .chip = &rtw8852c_chip_info, + .quirks = rtw8852c_pci_quirks, .bus = { .pci = &rtw8852c_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c index 4981b657bd..ce8aaa9501 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c @@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = { static const struct rtw89_driver_info rtw89_8922ae_info = { .chip = &rtw8922a_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8922a_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index ccad026def..ca4835008b 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -457,14 +457,17 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; const struct rtw89_chip_info *chip = rtwdev->chip; bool include_bb = !!chip->bbmcu_nr; + bool disable_intr_for_dlfw = false; struct ieee80211_sta *wow_sta; struct rtw89_sta *rtwsta = NULL; bool is_conn = true; int ret; - rtw89_hci_disable_intr(rtwdev); + if (chip_id == RTL8852C || chip_id == RTL8922A) + disable_intr_for_dlfw = true; wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid); if (wow_sta) @@ -472,12 +475,18 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) else is_conn = false; + if (disable_intr_for_dlfw) + rtw89_hci_disable_intr(rtwdev); + ret = rtw89_fw_download(rtwdev, fw_type, include_bb); if (ret) { rtw89_warn(rtwdev, "download fw failed\n"); return ret; } + if (disable_intr_for_dlfw) + rtw89_hci_enable_intr(rtwdev); + rtw89_phy_init_rf_reg(rtwdev, true); ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, @@ -520,7 +529,6 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) } rtw89_mac_hw_mgnt_sec(rtwdev, wow); - rtw89_hci_enable_intr(rtwdev); return 0; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c index 2fe724d623..33c5a46f1b 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -210,7 +210,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink) rc = PTR_ERR(devlink->cd_regions[i]); dev_err(devlink->dev, "Devlink region fail,err %d", rc); /* Delete previously created regions */ - for ( ; i >= 0; i--) + for (i--; i >= 0; i--) devlink_region_destroy(devlink->cd_regions[i]); goto region_create_fail; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 095f59e7aa..d513fd2758 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -414,7 +414,15 @@ static inline void nvme_end_req_zoned(struct request *req) } } -static inline void nvme_end_req(struct request *req) +static inline void __nvme_end_req(struct request *req) +{ + nvme_end_req_zoned(req); + nvme_trace_bio_complete(req); + if (req->cmd_flags & REQ_NVME_MPATH) + nvme_mpath_end_request(req); +} + +void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); @@ -424,10 +432,7 @@ static inline void nvme_end_req(struct request *req) else nvme_log_error(req); } - nvme_end_req_zoned(req); - nvme_trace_bio_complete(req); - if (req->cmd_flags & REQ_NVME_MPATH) - nvme_mpath_end_request(req); + __nvme_end_req(req); blk_mq_end_request(req, status); } @@ -476,7 +481,7 @@ void nvme_complete_batch_req(struct request *req) { trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); - nvme_end_req_zoned(req); + __nvme_end_req(req); } EXPORT_SYMBOL_GPL(nvme_complete_batch_req); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d16e976ae1..a4e46eb20b 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -118,7 +118,8 @@ void nvme_failover_req(struct request *req) blk_steal_bios(&ns->head->requeue_list, req); spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); + nvme_req(req)->status = 0; + nvme_end_req(req); kblockd_schedule_work(&ns->head->requeue_work); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 05532c2811..d7bcc6d51e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -767,6 +767,7 @@ static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl) } } +void nvme_end_req(struct request *req); void nvme_complete_rq(struct request *req); void nvme_complete_batch_req(struct request *req); diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index e05571b2a1..8fa1ffcdae 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -77,7 +77,7 @@ static int nvme_sc_to_pr_err(int nvme_sc) if (nvme_is_path_error(nvme_sc)) return PR_STS_PATH_FAILED; - switch (nvme_sc) { + switch (nvme_sc & 0x7ff) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 7fda69395c..dfdff6aba6 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -676,10 +676,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item, if (kstrtobool(page, &enable)) return -EINVAL; + /* + * take a global nvmet_config_sem because the disable routine has a + * window where it releases the subsys-lock, giving a chance to + * a parallel enable to concurrently execute causing the disable to + * have a misaccounting of the ns percpu_ref. + */ + down_write(&nvmet_config_sem); if (enable) ret = nvmet_ns_enable(ns); else nvmet_ns_disable(ns); + up_write(&nvmet_config_sem); return ret ? ret : count; } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index bb4a69d538..f003782d4e 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) req->cmd->common.opcode == nvme_admin_identify) { switch (req->cmd->identify.cns) { case NVME_ID_CNS_CTRL: - nvmet_passthru_override_id_ctrl(req); + status = nvmet_passthru_override_id_ctrl(req); break; case NVME_ID_CNS_NS: - nvmet_passthru_override_id_ns(req); + status = nvmet_passthru_override_id_ns(req); break; case NVME_ID_CNS_NS_DESC_LIST: - nvmet_passthru_override_id_descs(req); + status = nvmet_passthru_override_id_descs(req); break; } } else if (status < 0) diff --git a/drivers/of/module.c b/drivers/of/module.c index f58e624953..780fd82a7e 100644 --- a/drivers/of/module.c +++ b/drivers/of/module.c @@ -29,14 +29,15 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len) csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T', of_node_get_device_type(np)); tsize = csize; + if (csize >= len) + csize = len > 0 ? len - 1 : 0; len -= csize; - if (str) - str += csize; + str += csize; of_property_for_each_string(np, "compatible", p, compat) { csize = strlen(compat) + 1; tsize += csize; - if (csize > len) + if (csize >= len) continue; csize = snprintf(str, len, "C%s", compat); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e233734b72..cb4611fe1b 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2394,7 +2394,8 @@ static void _opp_detach_genpd(struct opp_table *opp_table) static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, const char * const *names, struct device ***virt_devs) { - struct device *virt_dev; + struct device *virt_dev, *gdev; + struct opp_table *genpd_table; int index = 0, ret = -EINVAL; const char * const *name = names; @@ -2428,6 +2429,34 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, } /* + * The required_opp_tables parsing is not perfect, as the OPP + * core does the parsing solely based on the DT node pointers. + * The core sets the required_opp_tables entry to the first OPP + * table in the "opp_tables" list, that matches with the node + * pointer. + * + * If the target DT OPP table is used by multiple devices and + * they all create separate instances of 'struct opp_table' from + * it, then it is possible that the required_opp_tables entry + * may be set to the incorrect sibling device. + * + * Cross check it again and fix if required. + */ + gdev = dev_to_genpd_dev(virt_dev); + if (IS_ERR(gdev)) + return PTR_ERR(gdev); + + genpd_table = _find_opp_table(gdev); + if (!IS_ERR(genpd_table)) { + if (genpd_table != opp_table->required_opp_tables[index]) { + dev_pm_opp_put_opp_table(opp_table->required_opp_tables[index]); + opp_table->required_opp_tables[index] = genpd_table; + } else { + dev_pm_opp_put_opp_table(genpd_table); + } + } + + /* * Add the virtual genpd device as a user of the OPP table, so * we can call dev_pm_opp_set_opp() on it directly. * diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 746a11dcb6..c43a1479de 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -604,11 +604,16 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct dw_pcie_ep_func *ep_func; + struct device *dev = pci->dev; + struct pci_epc *epc = ep->epc; unsigned int offset, ptm_cap_base; unsigned int nbars; u8 hdr_type; + u8 func_no; + int i, ret; + void *addr; u32 reg; - int i; hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & PCI_HEADER_TYPE_MASK; @@ -619,6 +624,58 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) return -EIO; } + dw_pcie_version_detect(pci); + + dw_pcie_iatu_detect(pci); + + ret = dw_pcie_edma_detect(pci); + if (ret) + return ret; + + if (!ep->ib_window_map) { + ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, + GFP_KERNEL); + if (!ep->ib_window_map) + goto err_remove_edma; + } + + if (!ep->ob_window_map) { + ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, + GFP_KERNEL); + if (!ep->ob_window_map) + goto err_remove_edma; + } + + if (!ep->outbound_addr) { + addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t), + GFP_KERNEL); + if (!addr) + goto err_remove_edma; + ep->outbound_addr = addr; + } + + for (func_no = 0; func_no < epc->max_functions; func_no++) { + + ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); + if (ep_func) + continue; + + ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); + if (!ep_func) + goto err_remove_edma; + + ep_func->func_no = func_no; + ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, + PCI_CAP_ID_MSI); + ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, + PCI_CAP_ID_MSIX); + + list_add_tail(&ep_func->list, &ep->func_list); + } + + if (ep->ops->init) + ep->ops->init(ep); + offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM); @@ -658,14 +715,17 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) dw_pcie_dbi_ro_wr_dis(pci); return 0; + +err_remove_edma: + dw_pcie_edma_remove(pci); + + return ret; } EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete); int dw_pcie_ep_init(struct dw_pcie_ep *ep) { int ret; - void *addr; - u8 func_no; struct resource *res; struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -673,7 +733,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; const struct pci_epc_features *epc_features; - struct dw_pcie_ep_func *ep_func; INIT_LIST_HEAD(&ep->func_list); @@ -691,26 +750,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (ep->ops->pre_init) ep->ops->pre_init(ep); - dw_pcie_version_detect(pci); - - dw_pcie_iatu_detect(pci); - - ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, - GFP_KERNEL); - if (!ep->ib_window_map) - return -ENOMEM; - - ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, - GFP_KERNEL); - if (!ep->ob_window_map) - return -ENOMEM; - - addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t), - GFP_KERNEL); - if (!addr) - return -ENOMEM; - ep->outbound_addr = addr; - epc = devm_pci_epc_create(dev, &epc_ops); if (IS_ERR(epc)) { dev_err(dev, "Failed to create epc device\n"); @@ -724,23 +763,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (ret < 0) epc->max_functions = 1; - for (func_no = 0; func_no < epc->max_functions; func_no++) { - ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); - if (!ep_func) - return -ENOMEM; - - ep_func->func_no = func_no; - ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, - PCI_CAP_ID_MSI); - ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, - PCI_CAP_ID_MSIX); - - list_add_tail(&ep_func->list, &ep->func_list); - } - - if (ep->ops->init) - ep->ops->init(ep); - ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, ep->page_size); if (ret < 0) { @@ -756,25 +778,25 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) goto err_exit_epc_mem; } - ret = dw_pcie_edma_detect(pci); - if (ret) - goto err_free_epc_mem; - if (ep->ops->get_features) { epc_features = ep->ops->get_features(ep); if (epc_features->core_init_notifier) return 0; } + /* + * NOTE:- Avoid accessing the hardware (Ex:- DBI space) before this + * step as platforms that implement 'core_init_notifier' feature may + * not have the hardware ready (i.e. core initialized) for access + * (Ex: tegra194). Any hardware access on such platforms result + * in system hang. + */ ret = dw_pcie_ep_init_complete(ep); if (ret) - goto err_remove_edma; + goto err_free_epc_mem; return 0; -err_remove_edma: - dw_pcie_edma_remove(pci); - err_free_epc_mem: pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, epc->mem->window.page_size); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 1f7b662cb8..e440c09d1d 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -2273,11 +2273,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) ret = tegra_pcie_config_ep(pcie, pdev); if (ret < 0) goto fail; + else + return 0; break; default: dev_err(dev, "Invalid PCIe device type %d\n", pcie->of_data->mode); + ret = -EINVAL; } fail: diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index c9046e97a1..79c3e7e9a2 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -98,10 +98,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, /* All functions share the same vendor ID with function 0 */ if (fn == 0) { - u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) | - (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16; - - rockchip_pcie_write(rockchip, vid_regs, + rockchip_pcie_write(rockchip, + hdr->vendorid | hdr->subsys_vendor_id << 16, PCIE_CORE_CONFIG_VENDOR); } diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c index c2c7334152..03539e5053 100644 --- a/drivers/pci/of_property.c +++ b/drivers/pci/of_property.c @@ -238,6 +238,8 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs, return 0; int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL); + if (!int_map) + return -ENOMEM; mapp = int_map; list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e5f243dd42..cbbf197df8 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1277,6 +1277,11 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) for (;;) { u32 id; + if (pci_dev_is_disconnected(dev)) { + pci_dbg(dev, "disconnected; not waiting\n"); + return -ENOTTY; + } + pci_read_config_dword(dev, PCI_COMMAND, &id); if (!PCI_POSSIBLE_ERROR(id)) break; @@ -2962,6 +2967,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), }, }, + { + /* + * Changing power state of root port dGPU is connected fails + * https://gitlab.freedesktop.org/drm/amd/-/issues/3229 + */ + .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_BOARD_NAME, "1972"), + DMI_MATCH(DMI_BOARD_VERSION, "95.33"), + }, + }, #endif { } }; @@ -4629,7 +4646,7 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) * avoid LTSSM race as recommended in Implementation Note at the * end of PCIe r6.0.1 sec 7.5.3.7. */ - rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); + rc = pcie_wait_for_link_status(pdev, true, false); if (rc) return rc; diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c index 5f4914d313..e86298dbbc 100644 --- a/drivers/pci/pcie/edr.c +++ b/drivers/pci/pcie/edr.c @@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev) int status = 0; /* - * Behavior when calling unsupported _DSM functions is undefined, - * so check whether EDR_PORT_DPC_ENABLE_DSM is supported. + * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is + * optional. Return success if it's not implemented. */ - if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5, + if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6, 1ULL << EDR_PORT_DPC_ENABLE_DSM)) return 0; @@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev) argv4.package.count = 1; argv4.package.elements = &req; - /* - * Per Downstream Port Containment Related Enhancements ECN to PCI - * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is - * optional. Return success if it's not implemented. - */ - obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5, + obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6, EDR_PORT_DPC_ENABLE_DSM, &argv4); if (!obj) return 0; @@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev) u16 port; /* - * Behavior when calling unsupported _DSM functions is undefined, - * so check whether EDR_PORT_DPC_ENABLE_DSM is supported. + * If EDR_PORT_LOCATE_DSM is not implemented under the target of + * EDR, the target is the port that experienced the containment + * event (PCI Firmware r3.3, sec 4.6.13). */ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5, 1ULL << EDR_PORT_LOCATE_DSM)) @@ -104,6 +100,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev) } /* + * Bit 31 represents the success/failure of the operation. If bit + * 31 is set, the operation failed. + */ + if (obj->integer.value & BIT(31)) { + ACPI_FREE(obj); + pci_err(pdev, "Locate Port _DSM failed\n"); + return NULL; + } + + /* * Firmware returns DPC port BDF details in following format: * 15:8 = bus * 7:3 = device diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index 8a81be2dd5..88c17c1d6d 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -542,12 +542,16 @@ static int dmc620_pmu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; + hwc->idx = -1; + + if (event->group_leader == event) + return 0; + /* * We can't atomically disable all HW counters so only one event allowed, * although software events are acceptable. */ - if (event->group_leader != event && - !is_software_event(event->group_leader)) + if (!is_software_event(event->group_leader)) return -EINVAL; for_each_sibling_event(sibling, event->group_leader) { @@ -556,7 +560,6 @@ static int dmc620_pmu_event_init(struct perf_event *event) return -EINVAL; } - hwc->idx = -1; return 0; } diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c index 5d1f0e9fdb..dba3991256 100644 --- a/drivers/perf/hisilicon/hisi_pcie_pmu.c +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -350,15 +350,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event) return false; for (num = 0; num < counters; num++) { + /* + * If we find a related event, then it's a valid group + * since we don't need to allocate a new counter for it. + */ if (hisi_pcie_pmu_cmp_event(event_group[num], sibling)) break; } + /* + * Otherwise it's a new event but if there's no available counter, + * fail the check since we cannot schedule all the events in + * the group simultaneously. + */ + if (num == HISI_PCIE_MAX_COUNTERS) + return false; + if (num == counters) event_group[counters++] = sibling; } - return counters <= HISI_PCIE_MAX_COUNTERS; + return true; } static int hisi_pcie_pmu_event_init(struct perf_event *event) diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c index 16869bf5bf..60062eaa34 100644 --- a/drivers/perf/hisilicon/hns3_pmu.c +++ b/drivers/perf/hisilicon/hns3_pmu.c @@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event) return false; for (num = 0; num < counters; num++) { + /* + * If we find a related event, then it's a valid group + * since we don't need to allocate a new counter for it. + */ if (hns3_pmu_cmp_event(event_group[num], sibling)) break; } + /* + * Otherwise it's a new event but if there's no available counter, + * fail the check since we cannot schedule all the events in + * the group simultaneously. + */ + if (num == HNS3_PMU_MAX_HW_EVENTS) + return false; + if (num == counters) event_group[counters++] = sibling; } - return counters <= HNS3_PMU_MAX_HW_EVENTS; + return true; } static u32 hns3_pmu_get_filter_condition(struct perf_event *event) @@ -1515,7 +1527,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev, return ret; } - ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev); + ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev); if (ret) { pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret); return ret; diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 8cbe6e5f9c..3e44d2fb8b 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -27,7 +27,7 @@ #define ALT_SBI_PMU_OVERFLOW(__ovl) \ asm volatile(ALTERNATIVE_2( \ - "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ + "csrr %0, " __stringify(CSR_SCOUNTOVF), \ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ CONFIG_ERRATA_THEAD_PMU, \ diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index c21cdb8dbf..b8919443e4 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -187,6 +187,31 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN, }; +static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = QPHY_V6_N4_PCS_SW_RESET, + [QPHY_START_CTRL] = QPHY_V6_N4_PCS_START_CONTROL, + [QPHY_PCS_STATUS] = QPHY_V6_N4_PCS_PCS_STATUS1, + [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_N4_PCS_POWER_DOWN_CONTROL, + + /* In PCS_USB */ + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR, + + [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL, + [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS, + [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS, + [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, + + [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS, + [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV, + + [QPHY_TX_TX_POL_INV] = QSERDES_V6_N4_TX_TX_POL_INV, + [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_N4_TX_TX_DRV_LVL, + [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_N4_TX_TX_EMP_POST1_LVL, + [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_N4_TX_HIGHZ_DRVR_EN, + [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN, +}; + static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), @@ -997,6 +1022,31 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f), }; +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f), +}; + static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40), QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30), @@ -1011,6 +1061,19 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4), }; +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1), +}; + static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = { QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05), QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), @@ -1059,6 +1122,74 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = { QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), }; +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), +}; + +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), +}; + +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02), +}; + +static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = { + QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01), +}; + static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01), QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31), @@ -1273,20 +1404,20 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = { }; static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = { - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b), - QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10), }; static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = { @@ -1382,6 +1513,13 @@ static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = { { 0x3f, 0xff, 0xff, 0xff } }; +static const u8 qmp_dp_v6_voltage_swing_hbr_rbr[4][4] = { + { 0x27, 0x2f, 0x36, 0x3f }, + { 0x31, 0x3e, 0x3f, 0xff }, + { 0x36, 0x3f, 0xff, 0xff }, + { 0x3f, 0xff, 0xff, 0xff } +}; + static const u8 qmp_dp_v6_pre_emphasis_hbr_rbr[4][4] = { { 0x20, 0x2d, 0x34, 0x3a }, { 0x20, 0x2e, 0x35, 0xff }, @@ -1787,22 +1925,22 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = { .pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl, .pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl), - .dp_serdes_tbl = qmp_v6_dp_serdes_tbl, - .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl), - .dp_tx_tbl = qmp_v6_dp_tx_tbl, - .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl), - - .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr, - .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr), - .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr, - .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr), - .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2, - .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2), - .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3, - .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3), - - .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr, - .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr, + .dp_serdes_tbl = qmp_v6_n4_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl), + .dp_tx_tbl = qmp_v6_n4_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v6_n4_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v6_n4_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v6_n4_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v6_n4_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3), + + .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr, + .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr, .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2, .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2, @@ -1815,7 +1953,7 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = { .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), .vreg_list = qmp_phy_vreg_l, .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), - .regs = qmp_v45_usb3phy_regs_layout, + .regs = qmp_v6_n4_usb3phy_regs_layout, }; static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = { @@ -2001,6 +2139,51 @@ static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), }; +static const struct qmp_phy_cfg sm8650_usb3dpphy_cfg = { + .offsets = &qmp_combo_offsets_v3, + + .serdes_tbl = sm8550_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8550_usb3_serdes_tbl), + .tx_tbl = sm8550_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8550_usb3_tx_tbl), + .rx_tbl = sm8550_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8550_usb3_rx_tbl), + .pcs_tbl = sm8550_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_tbl), + .pcs_usb_tbl = sm8550_usb3_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_usb_tbl), + + .dp_serdes_tbl = qmp_v6_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl), + .dp_tx_tbl = qmp_v6_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3), + + .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr, + .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr, + .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2, + .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2, + + .dp_aux_init = qmp_v4_dp_aux_init, + .configure_dp_tx = qmp_v4_configure_dp_tx, + .configure_dp_phy = qmp_v4_configure_dp_phy, + .calibrate_dp_phy = qmp_v4_calibrate_dp_phy, + + .regs = qmp_v6_usb3phy_regs_layout, + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), +}; + static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp) { const struct qmp_phy_cfg *cfg = qmp->cfg; @@ -3631,7 +3814,7 @@ static const struct of_device_id qmp_combo_of_match_table[] = { }, { .compatible = "qcom,sm8650-qmp-usb3-dp-phy", - .data = &sm8550_usb3dpphy_cfg, + .data = &sm8650_usb3dpphy_cfg, }, { .compatible = "qcom,x1e80100-qmp-usb3-dp-phy", diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h new file mode 100644 index 0000000000..b3024714da --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef QCOM_PHY_QMP_PCS_V6_N4_H_ +#define QCOM_PHY_QMP_PCS_V6_N4_H_ + +/* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */ +#define QPHY_V6_N4_PCS_SW_RESET 0x000 +#define QPHY_V6_N4_PCS_PCS_STATUS1 0x014 +#define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL 0x040 +#define QPHY_V6_N4_PCS_START_CONTROL 0x044 +#define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1 0x090 +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1 0x0c4 +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2 0x0c8 +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3 0x0cc +#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6 0x0d8 +#define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1 0x0dc +#define QPHY_V6_N4_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190 +#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194 +#define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1 0x198 +#define QPHY_V6_N4_PCS_RX_CONFIG 0x1b0 +#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1 0x1c0 +#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2 0x1c4 +#define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG 0x1d0 +#define QPHY_V6_N4_PCS_EQ_CONFIG1 0x1dc +#define QPHY_V6_N4_PCS_EQ_CONFIG2 0x1e0 +#define QPHY_V6_N4_PCS_EQ_CONFIG5 0x1ec + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h index a814ad11af..d37cc0d4fd 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h @@ -6,11 +6,24 @@ #ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_ #define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_ +#define QSERDES_V6_N4_TX_CLKBUF_ENABLE 0x08 +#define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL 0x0c +#define QSERDES_V6_N4_TX_TX_DRV_LVL 0x14 +#define QSERDES_V6_N4_TX_RESET_TSYNC_EN 0x1c +#define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN 0x20 #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30 #define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34 +#define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN 0x48 +#define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN 0x4c +#define QSERDES_V6_N4_TX_TX_POL_INV 0x50 +#define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN 0x54 #define QSERDES_V6_N4_TX_LANE_MODE_1 0x78 #define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c #define QSERDES_V6_N4_TX_LANE_MODE_3 0x80 +#define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN 0xac +#define QSERDES_V6_N4_TX_TX_BAND 0xd8 +#define QSERDES_V6_N4_TX_INTERFACE_SELECT 0xe4 +#define QSERDES_V6_N4_TX_VMODE_CTRL1 0xb0 #define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8 #define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index d10b8f653c..d0f41e4aaa 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -46,6 +46,8 @@ #include "phy-qcom-qmp-pcs-v6.h" +#include "phy-qcom-qmp-pcs-v6-n4.h" + #include "phy-qcom-qmp-pcs-v6_20.h" #include "phy-qcom-qmp-pcs-v7.h" diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c index c25357ca19..b9f067de8e 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm7150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c @@ -65,7 +65,7 @@ enum { .intr_detection_width = 2, \ } -#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ +#define SDC_QDSD_PINGROUP(pg_name, _tile, ctl, pull, drv) \ { \ .grp = PINCTRL_PINGROUP(#pg_name, \ pg_name##_pins, \ @@ -75,7 +75,7 @@ enum { .intr_cfg_reg = 0, \ .intr_status_reg = 0, \ .intr_target_reg = 0, \ - .tile = SOUTH, \ + .tile = _tile, \ .mux_bit = -1, \ .pull_bit = pull, \ .drv_bit = drv, \ @@ -101,7 +101,7 @@ enum { .intr_cfg_reg = 0, \ .intr_status_reg = 0, \ .intr_target_reg = 0, \ - .tile = SOUTH, \ + .tile = WEST, \ .mux_bit = -1, \ .pull_bit = 3, \ .drv_bit = 0, \ @@ -1199,13 +1199,13 @@ static const struct msm_pingroup sm7150_groups[] = { [117] = PINGROUP(117, NORTH, _, _, _, _, _, _, _, _, _), [118] = PINGROUP(118, NORTH, _, _, _, _, _, _, _, _, _), [119] = UFS_RESET(ufs_reset, 0x9f000), - [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0), - [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6), - [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3), - [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0), - [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x98000, 14, 6), - [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x98000, 11, 3), - [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x98000, 9, 0), + [120] = SDC_QDSD_PINGROUP(sdc1_rclk, WEST, 0x9a000, 15, 0), + [121] = SDC_QDSD_PINGROUP(sdc1_clk, WEST, 0x9a000, 13, 6), + [122] = SDC_QDSD_PINGROUP(sdc1_cmd, WEST, 0x9a000, 11, 3), + [123] = SDC_QDSD_PINGROUP(sdc1_data, WEST, 0x9a000, 9, 0), + [124] = SDC_QDSD_PINGROUP(sdc2_clk, SOUTH, 0x98000, 14, 6), + [125] = SDC_QDSD_PINGROUP(sdc2_cmd, SOUTH, 0x98000, 11, 3), + [126] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x98000, 9, 0), }; static const struct msm_gpio_wakeirq_map sm7150_pdc_map[] = { diff --git a/drivers/pinctrl/renesas/pfc-r8a779h0.c b/drivers/pinctrl/renesas/pfc-r8a779h0.c index afa8f06c85..0cbfe7637f 100644 --- a/drivers/pinctrl/renesas/pfc-r8a779h0.c +++ b/drivers/pinctrl/renesas/pfc-r8a779h0.c @@ -75,10 +75,10 @@ #define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4) #define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0) #define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28) -#define GPSR0_6 F_(IRQ0, IP0SR0_27_24) -#define GPSR0_5 F_(IRQ1, IP0SR0_23_20) -#define GPSR0_4 F_(IRQ2, IP0SR0_19_16) -#define GPSR0_3 F_(IRQ3, IP0SR0_15_12) +#define GPSR0_6 F_(IRQ0_A, IP0SR0_27_24) +#define GPSR0_5 F_(IRQ1_A, IP0SR0_23_20) +#define GPSR0_4 F_(IRQ2_A, IP0SR0_19_16) +#define GPSR0_3 F_(IRQ3_A, IP0SR0_15_12) #define GPSR0_2 F_(GP0_02, IP0SR0_11_8) #define GPSR0_1 F_(GP0_01, IP0SR0_7_4) #define GPSR0_0 F_(GP0_00, IP0SR0_3_0) @@ -265,10 +265,10 @@ #define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_15_12 FM(IRQ3_A) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_19_16 FM(IRQ2_A) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_23_20 FM(IRQ1_A) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_27_24 FM(IRQ0_A) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ @@ -672,16 +672,16 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2), - PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3), + PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3_A), PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK), - PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2), + PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2_A), PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD), - PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1), + PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1_A), PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD), - PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0), + PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0_A), PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC), PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2), diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 20425afc6b..248ab71b9f 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -892,6 +892,8 @@ static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps val = PVDD_1800; break; case 2500: + if (!(caps & (PIN_CFG_IO_VMC_ETH0 | PIN_CFG_IO_VMC_ETH1))) + return -EINVAL; val = PVDD_2500; break; case 3300: diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index badc68bbae..47d19f7e29 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -432,6 +432,12 @@ static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev) void cros_ec_resume_complete(struct cros_ec_device *ec_dev) { cros_ec_send_resume_event(ec_dev); + + /* + * Let the mfd devices know about events that occur during + * suspend. This way the clients know what to do with them. + */ + cros_ec_report_events_during_suspend(ec_dev); } EXPORT_SYMBOL(cros_ec_resume_complete); @@ -442,12 +448,6 @@ static void cros_ec_enable_irq(struct cros_ec_device *ec_dev) if (ec_dev->wake_enabled) disable_irq_wake(ec_dev->irq); - - /* - * Let the mfd devices know about events that occur during - * suspend. This way the clients know what to do with them. - */ - cros_ec_report_events_during_suspend(ec_dev); } /** @@ -475,8 +475,8 @@ EXPORT_SYMBOL(cros_ec_resume_early); */ int cros_ec_resume(struct cros_ec_device *ec_dev) { - cros_ec_enable_irq(ec_dev); - cros_ec_send_resume_event(ec_dev); + cros_ec_resume_early(ec_dev); + cros_ec_resume_complete(ec_dev); return 0; } EXPORT_SYMBOL(cros_ec_resume); diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c index f618757f8b..930c2f4726 100644 --- a/drivers/platform/chrome/cros_usbpd_logger.c +++ b/drivers/platform/chrome/cros_usbpd_logger.c @@ -7,6 +7,7 @@ #include <linux/ktime.h> #include <linux/math64.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> @@ -249,6 +250,12 @@ static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev) static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend, cros_usbpd_logger_resume); +static const struct platform_device_id cros_usbpd_logger_id[] = { + { DRV_NAME, 0 }, + {} +}; +MODULE_DEVICE_TABLE(platform, cros_usbpd_logger_id); + static struct platform_driver cros_usbpd_logger_driver = { .driver = { .name = DRV_NAME, @@ -256,10 +263,10 @@ static struct platform_driver cros_usbpd_logger_driver = { }, .probe = cros_usbpd_logger_probe, .remove_new = cros_usbpd_logger_remove, + .id_table = cros_usbpd_logger_id, }; module_platform_driver(cros_usbpd_logger_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger."); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c index aacad022f2..c83f81d864 100644 --- a/drivers/platform/chrome/cros_usbpd_notify.c +++ b/drivers/platform/chrome/cros_usbpd_notify.c @@ -6,6 +6,7 @@ */ #include <linux/acpi.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_data/cros_ec_proto.h> #include <linux/platform_data/cros_usbpd_notify.h> @@ -218,12 +219,19 @@ static void cros_usbpd_notify_remove_plat(struct platform_device *pdev) &pdnotify->nb); } +static const struct platform_device_id cros_usbpd_notify_id[] = { + { DRV_NAME, 0 }, + {} +}; +MODULE_DEVICE_TABLE(platform, cros_usbpd_notify_id); + static struct platform_driver cros_usbpd_notify_plat_driver = { .driver = { .name = DRV_NAME, }, .probe = cros_usbpd_notify_probe_plat, .remove_new = cros_usbpd_notify_remove_plat, + .id_table = cros_usbpd_notify_id, }; static int __init cros_usbpd_notify_init(void) @@ -258,4 +266,3 @@ module_exit(cros_usbpd_notify_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ChromeOS power delivery notifier device"); MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index e61bfaf8b5..86b95206cb 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -11,6 +11,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/container_of.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/capability.h> @@ -25,11 +26,16 @@ static u32 da_supported_commands; static int da_num_tokens; static struct platform_device *platform_device; static struct calling_interface_token *da_tokens; -static struct device_attribute *token_location_attrs; -static struct device_attribute *token_value_attrs; +static struct token_sysfs_data *token_entries; static struct attribute **token_attrs; static DEFINE_MUTEX(smbios_mutex); +struct token_sysfs_data { + struct device_attribute location_attr; + struct device_attribute value_attr; + struct calling_interface_token *token; +}; + struct smbios_device { struct list_head list; struct device *device; @@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy) } } -static int match_attribute(struct device *dev, - struct device_attribute *attr) -{ - int i; - - for (i = 0; i < da_num_tokens * 2; i++) { - if (!token_attrs[i]) - continue; - if (strcmp(token_attrs[i]->name, attr->attr.name) == 0) - return i/2; - } - dev_dbg(dev, "couldn't match: %s\n", attr->attr.name); - return -EINVAL; -} - static ssize_t location_show(struct device *dev, struct device_attribute *attr, char *buf) { - int i; + struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr); if (!capable(CAP_SYS_ADMIN)) return -EPERM; - i = match_attribute(dev, attr); - if (i > 0) - return sysfs_emit(buf, "%08x", da_tokens[i].location); - return 0; + return sysfs_emit(buf, "%08x", data->token->location); } static ssize_t value_show(struct device *dev, struct device_attribute *attr, char *buf) { - int i; + struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr); if (!capable(CAP_SYS_ADMIN)) return -EPERM; - i = match_attribute(dev, attr); - if (i > 0) - return sysfs_emit(buf, "%08x", da_tokens[i].value); - return 0; + return sysfs_emit(buf, "%08x", data->token->value); } static struct attribute_group smbios_attribute_group = { @@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev) { char *location_name; char *value_name; - size_t size; int ret; int i, j; - /* (number of tokens + 1 for null terminated */ - size = sizeof(struct device_attribute) * (da_num_tokens + 1); - token_location_attrs = kzalloc(size, GFP_KERNEL); - if (!token_location_attrs) + token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL); + if (!token_entries) return -ENOMEM; - token_value_attrs = kzalloc(size, GFP_KERNEL); - if (!token_value_attrs) - goto out_allocate_value; /* need to store both location and value + terminator*/ - size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1); - token_attrs = kzalloc(size, GFP_KERNEL); + token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL); if (!token_attrs) goto out_allocate_attrs; @@ -496,27 +474,32 @@ static int build_tokens_sysfs(struct platform_device *dev) /* skip empty */ if (da_tokens[i].tokenID == 0) continue; + + token_entries[i].token = &da_tokens[i]; + /* add location */ location_name = kasprintf(GFP_KERNEL, "%04x_location", da_tokens[i].tokenID); if (location_name == NULL) goto out_unwind_strings; - sysfs_attr_init(&token_location_attrs[i].attr); - token_location_attrs[i].attr.name = location_name; - token_location_attrs[i].attr.mode = 0444; - token_location_attrs[i].show = location_show; - token_attrs[j++] = &token_location_attrs[i].attr; + + sysfs_attr_init(&token_entries[i].location_attr.attr); + token_entries[i].location_attr.attr.name = location_name; + token_entries[i].location_attr.attr.mode = 0444; + token_entries[i].location_attr.show = location_show; + token_attrs[j++] = &token_entries[i].location_attr.attr; /* add value */ value_name = kasprintf(GFP_KERNEL, "%04x_value", da_tokens[i].tokenID); if (value_name == NULL) goto loop_fail_create_value; - sysfs_attr_init(&token_value_attrs[i].attr); - token_value_attrs[i].attr.name = value_name; - token_value_attrs[i].attr.mode = 0444; - token_value_attrs[i].show = value_show; - token_attrs[j++] = &token_value_attrs[i].attr; + + sysfs_attr_init(&token_entries[i].value_attr.attr); + token_entries[i].value_attr.attr.name = value_name; + token_entries[i].value_attr.attr.mode = 0444; + token_entries[i].value_attr.show = value_show; + token_attrs[j++] = &token_entries[i].value_attr.attr; continue; loop_fail_create_value: @@ -532,14 +515,12 @@ loop_fail_create_value: out_unwind_strings: while (i--) { - kfree(token_location_attrs[i].attr.name); - kfree(token_value_attrs[i].attr.name); + kfree(token_entries[i].location_attr.attr.name); + kfree(token_entries[i].value_attr.attr.name); } kfree(token_attrs); out_allocate_attrs: - kfree(token_value_attrs); -out_allocate_value: - kfree(token_location_attrs); + kfree(token_entries); return -ENOMEM; } @@ -551,12 +532,11 @@ static void free_group(struct platform_device *pdev) sysfs_remove_group(&pdev->dev.kobj, &smbios_attribute_group); for (i = 0; i < da_num_tokens; i++) { - kfree(token_location_attrs[i].attr.name); - kfree(token_value_attrs[i].attr.name); + kfree(token_entries[i].location_attr.attr.name); + kfree(token_entries[i].value_attr.attr.name); } kfree(token_attrs); - kfree(token_value_attrs); - kfree(token_location_attrs); + kfree(token_entries); } static int __init dell_smbios_init(void) diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 910df7c654..003e765ded 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -763,8 +763,11 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev) * when actual device nodes created outside this * loop via tpmi_create_devices(). */ - if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) - tpmi_process_info(tpmi_info, pfs); + if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) { + ret = tpmi_process_info(tpmi_info, pfs); + if (ret) + return ret; + } if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID) tpmi_set_control_base(auxdev, tpmi_info, pfs); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index ef730200a0..bb8e72deb3 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -240,6 +240,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; struct tpmi_uncore_struct *tpmi_uncore; + bool uncore_sysfs_added = false; int ret, i, pkg = 0; int num_resources; @@ -384,9 +385,15 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ } /* Point to next cluster offset */ cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN; + uncore_sysfs_added = true; } } + if (!uncore_sysfs_added) { + ret = -ENODEV; + goto remove_clusters; + } + auxiliary_set_drvdata(auxdev, tpmi_uncore); tpmi_uncore->root_cluster.root_domain = true; diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c index 3d66e1d4eb..1ac30034f3 100644 --- a/drivers/platform/x86/p2sb.c +++ b/drivers/platform/x86/p2sb.c @@ -56,12 +56,9 @@ static int p2sb_get_devfn(unsigned int *devfn) return 0; } -static bool p2sb_valid_resource(struct resource *res) +static bool p2sb_valid_resource(const struct resource *res) { - if (res->flags) - return true; - - return false; + return res->flags & ~IORESOURCE_UNSET; } /* Copy resource from the first BAR of the device in question */ @@ -220,16 +217,20 @@ EXPORT_SYMBOL_GPL(p2sb_bar); static int __init p2sb_fs_init(void) { - p2sb_cache_resources(); - return 0; + return p2sb_cache_resources(); } /* - * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can - * not be locked in sysfs pci bus rescan path because of deadlock. To - * avoid the deadlock, access to P2SB devices with the lock at an early - * step in kernel initialization and cache required resources. This - * should happen after subsys_initcall which initializes PCI subsystem - * and before device_initcall which requires P2SB resources. + * pci_rescan_remove_lock() can not be locked in sysfs PCI bus rescan path + * because of deadlock. To avoid the deadlock, access P2SB devices with the lock + * at an early step in kernel initialization and cache required resources. + * + * We want to run as early as possible. If the P2SB was assigned a bad BAR, + * we'll need to wait on pcibios_assign_resources() to fix it. So, our list of + * initcall dependencies looks something like this: + * + * ... + * subsys_initcall (pci_subsys_init) + * fs_initcall (pcibios_assign_resources) */ -fs_initcall(p2sb_fs_init); +fs_initcall_sync(p2sb_fs_init); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 82429e5999..87a4a381bd 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3044,10 +3044,9 @@ static void tpacpi_send_radiosw_update(void) static void hotkey_exit(void) { -#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL mutex_lock(&hotkey_mutex); +#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL hotkey_poll_stop_sync(); - mutex_unlock(&hotkey_mutex); #endif dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY, "restoring original HKEY status and mask\n"); @@ -3057,6 +3056,8 @@ static void hotkey_exit(void) hotkey_mask_set(hotkey_orig_mask)) | hotkey_status_set(false)) != 0) pr_err("failed to restore hot key mask to BIOS defaults\n"); + + mutex_unlock(&hotkey_mutex); } static void __init hotkey_unmap(const unsigned int scancode) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 77244c9aa6..16e941449b 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -57,6 +57,11 @@ module_param(turn_on_panel_on_resume, int, 0644); MODULE_PARM_DESC(turn_on_panel_on_resume, "Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes"); +static int hci_hotkey_quickstart = -1; +module_param(hci_hotkey_quickstart, int, 0644); +MODULE_PARM_DESC(hci_hotkey_quickstart, + "Call HCI_HOTKEY_EVENT with value 0x5 for quickstart button support (-1 = auto, 0 = no, 1 = yes"); + #define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100" /* Scan code for Fn key on TOS1900 models */ @@ -136,6 +141,7 @@ MODULE_PARM_DESC(turn_on_panel_on_resume, #define HCI_ACCEL_MASK 0x7fff #define HCI_ACCEL_DIRECTION_MASK 0x8000 #define HCI_HOTKEY_DISABLE 0x0b +#define HCI_HOTKEY_ENABLE_QUICKSTART 0x05 #define HCI_HOTKEY_ENABLE 0x09 #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 #define HCI_LCD_BRIGHTNESS_BITS 3 @@ -2731,10 +2737,15 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev) return -ENODEV; /* + * Enable quickstart buttons if supported. + * * Enable the "Special Functions" mode only if they are * supported and if they are activated. */ - if (dev->kbd_function_keys_supported && dev->special_functions) + if (hci_hotkey_quickstart) + result = hci_write(dev, HCI_HOTKEY_EVENT, + HCI_HOTKEY_ENABLE_QUICKSTART); + else if (dev->kbd_function_keys_supported && dev->special_functions) result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_SPECIAL_FUNCTIONS); else @@ -3258,7 +3269,14 @@ static const char *find_hci_method(acpi_handle handle) * works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing * the configured brightness level. */ -static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = { +#define QUIRK_TURN_ON_PANEL_ON_RESUME BIT(0) +/* + * Some Toshibas use "quickstart" keys. On these, HCI_HOTKEY_EVENT must use + * the value HCI_HOTKEY_ENABLE_QUICKSTART. + */ +#define QUIRK_HCI_HOTKEY_QUICKSTART BIT(1) + +static const struct dmi_system_id toshiba_dmi_quirks[] = { { /* Toshiba Portégé R700 */ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */ @@ -3266,6 +3284,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"), }, + .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME, }, { /* Toshiba Satellite/Portégé R830 */ @@ -3275,6 +3294,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "R830"), }, + .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME, }, { /* Toshiba Satellite/Portégé Z830 */ @@ -3282,6 +3302,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "Z830"), }, + .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART), }, }; @@ -3290,6 +3311,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) struct toshiba_acpi_dev *dev; const char *hci_method; u32 dummy; + const struct dmi_system_id *dmi_id; + long quirks = 0; int ret = 0; if (toshiba_acpi) @@ -3442,8 +3465,15 @@ iio_error: } #endif + dmi_id = dmi_first_match(toshiba_dmi_quirks); + if (dmi_id) + quirks = (long)dmi_id->driver_data; + if (turn_on_panel_on_resume == -1) - turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids); + turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME); + + if (hci_hotkey_quickstart == -1) + hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART); toshiba_wwan_available(dev); if (dev->wwan_supported) diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c index a3415f1c0b..6559bb4ea7 100644 --- a/drivers/platform/x86/x86-android-tablets/core.c +++ b/drivers/platform/x86/x86-android-tablets/core.c @@ -278,25 +278,25 @@ static void x86_android_tablet_remove(struct platform_device *pdev) { int i; - for (i = 0; i < serdev_count; i++) { + for (i = serdev_count - 1; i >= 0; i--) { if (serdevs[i]) serdev_device_remove(serdevs[i]); } kfree(serdevs); - for (i = 0; i < pdev_count; i++) + for (i = pdev_count - 1; i >= 0; i--) platform_device_unregister(pdevs[i]); kfree(pdevs); kfree(buttons); - for (i = 0; i < spi_dev_count; i++) + for (i = spi_dev_count - 1; i >= 0; i--) spi_unregister_device(spi_devs[i]); kfree(spi_devs); - for (i = 0; i < i2c_client_count; i++) + for (i = i2c_client_count - 1; i >= 0; i--) i2c_unregister_device(i2c_clients[i]); kfree(i2c_clients); diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c index 5d6c12494f..141a2d25e8 100644 --- a/drivers/platform/x86/x86-android-tablets/dmi.c +++ b/drivers/platform/x86/x86-android-tablets/dmi.c @@ -106,6 +106,24 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = { }, { /* + * Lenovo Yoga Tablet 2 Pro 1380F/L (13") This has more or less + * the same BIOS as the 830F/L or 1050F/L (8" and 10") below, + * but unlike the 8" / 10" models which share the same mainboard + * this model has a different mainboard. + * This match for the 13" model MUST come before the 8" + 10" + * match since that one will also match the 13" model! + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Full match so as to NOT match the 830/1050 BIOS */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"), + }, + .driver_data = (void *)&lenovo_yoga_tab2_1380_info, + }, + { + /* * Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10" * Lenovo Yoga Tablet 2 use the same mainboard) */ diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c index c297391955..16fa04d604 100644 --- a/drivers/platform/x86/x86-android-tablets/lenovo.c +++ b/drivers/platform/x86/x86-android-tablets/lenovo.c @@ -19,6 +19,7 @@ #include <linux/pinctrl/machine.h> #include <linux/platform_data/lp855x.h> #include <linux/platform_device.h> +#include <linux/power/bq24190_charger.h> #include <linux/reboot.h> #include <linux/rmi.h> #include <linux/spi/spi.h> @@ -565,6 +566,221 @@ static void lenovo_yoga_tab2_830_1050_exit(void) } } +/* + * Lenovo Yoga Tablet 2 Pro 1380F/L + * + * The Lenovo Yoga Tablet 2 Pro 1380F/L mostly has the same design as the 830F/L + * and the 1050F/L so this re-uses some of the handling for that from above. + */ +static const char * const lc824206xa_chg_det_psy[] = { "lc824206xa-charger-detect" }; + +static const struct property_entry lenovo_yoga_tab2_1380_bq24190_props[] = { + PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lc824206xa_chg_det_psy), + PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node), + PROPERTY_ENTRY_BOOL("omit-battery-class"), + PROPERTY_ENTRY_BOOL("disable-reset"), + { } +}; + +static const struct software_node lenovo_yoga_tab2_1380_bq24190_node = { + .properties = lenovo_yoga_tab2_1380_bq24190_props, +}; + +/* For enabling the bq24190 5V boost based on id-pin */ +static struct regulator_consumer_supply lc824206xa_consumer = { + .supply = "vbus", + .dev_name = "i2c-lc824206xa", +}; + +static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_data = { + .constraints = { + .name = "bq24190_vbus", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .consumer_supplies = &lc824206xa_consumer, + .num_consumer_supplies = 1, +}; + +struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = { + .regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data, +}; + +static const struct property_entry lenovo_yoga_tab2_1380_lc824206xa_props[] = { + PROPERTY_ENTRY_BOOL("onnn,enable-miclr-for-dcp"), + { } +}; + +static const struct software_node lenovo_yoga_tab2_1380_lc824206xa_node = { + .properties = lenovo_yoga_tab2_1380_lc824206xa_props, +}; + +static const char * const lenovo_yoga_tab2_1380_lms303d_mount_matrix[] = { + "0", "-1", "0", + "-1", "0", "0", + "0", "0", "1" +}; + +static const struct property_entry lenovo_yoga_tab2_1380_lms303d_props[] = { + PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", lenovo_yoga_tab2_1380_lms303d_mount_matrix), + { } +}; + +static const struct software_node lenovo_yoga_tab2_1380_lms303d_node = { + .properties = lenovo_yoga_tab2_1380_lms303d_props, +}; + +static const struct x86_i2c_client_info lenovo_yoga_tab2_1380_i2c_clients[] __initconst = { + { + /* BQ27541 fuel-gauge */ + .board_info = { + .type = "bq27541", + .addr = 0x55, + .dev_name = "bq27541", + .swnode = &fg_bq24190_supply_node, + }, + .adapter_path = "\\_SB_.I2C1", + }, { + /* bq24292i battery charger */ + .board_info = { + .type = "bq24190", + .addr = 0x6b, + .dev_name = "bq24292i", + .swnode = &lenovo_yoga_tab2_1380_bq24190_node, + .platform_data = &lenovo_yoga_tab2_1380_bq24190_pdata, + }, + .adapter_path = "\\_SB_.I2C1", + .irq_data = { + .type = X86_ACPI_IRQ_TYPE_GPIOINT, + .chip = "INT33FC:02", + .index = 2, + .trigger = ACPI_EDGE_SENSITIVE, + .polarity = ACPI_ACTIVE_HIGH, + .con_id = "bq24292i_irq", + }, + }, { + /* LP8557 Backlight controller */ + .board_info = { + .type = "lp8557", + .addr = 0x2c, + .dev_name = "lp8557", + .platform_data = &lenovo_lp8557_pwm_and_reg_pdata, + }, + .adapter_path = "\\_SB_.I2C3", + }, { + /* LC824206XA Micro USB Switch */ + .board_info = { + .type = "lc824206xa", + .addr = 0x48, + .dev_name = "lc824206xa", + .swnode = &lenovo_yoga_tab2_1380_lc824206xa_node, + }, + .adapter_path = "\\_SB_.I2C3", + .irq_data = { + .type = X86_ACPI_IRQ_TYPE_GPIOINT, + .chip = "INT33FC:02", + .index = 1, + .trigger = ACPI_LEVEL_SENSITIVE, + .polarity = ACPI_ACTIVE_LOW, + .con_id = "lc824206xa_irq", + }, + }, { + /* AL3320A ambient light sensor */ + .board_info = { + .type = "al3320a", + .addr = 0x1c, + .dev_name = "al3320a", + }, + .adapter_path = "\\_SB_.I2C5", + }, { + /* LSM303DA accelerometer + magnetometer */ + .board_info = { + .type = "lsm303d", + .addr = 0x1d, + .dev_name = "lsm303d", + .swnode = &lenovo_yoga_tab2_1380_lms303d_node, + }, + .adapter_path = "\\_SB_.I2C5", + }, { + /* Synaptics RMI touchscreen */ + .board_info = { + .type = "rmi4_i2c", + .addr = 0x38, + .dev_name = "rmi4_i2c", + .platform_data = &lenovo_yoga_tab2_830_1050_rmi_pdata, + }, + .adapter_path = "\\_SB_.I2C6", + .irq_data = { + .type = X86_ACPI_IRQ_TYPE_APIC, + .index = 0x45, + .trigger = ACPI_EDGE_SENSITIVE, + .polarity = ACPI_ACTIVE_HIGH, + }, + } +}; + +static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initconst = { + { + /* For the Tablet 2 Pro 1380's custom fast charging driver */ + .name = "lenovo-yoga-tab2-pro-1380-fastcharger", + .id = PLATFORM_DEVID_NONE, + }, +}; + +const char * const lenovo_yoga_tab2_1380_modules[] __initconst = { + "bq24190_charger", /* For the Vbus regulator for lc824206xa */ + NULL +}; + +static int __init lenovo_yoga_tab2_1380_init(void) +{ + int ret; + + /* To verify that the DMI matching works vs the 830 / 1050 models */ + pr_info("detected Lenovo Yoga Tablet 2 Pro 1380F/L\n"); + + ret = lenovo_yoga_tab2_830_1050_init_codec(); + if (ret) + return ret; + + /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ + lenovo_yoga_tab2_830_1050_sys_off_handler = + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1, + lenovo_yoga_tab2_830_1050_power_off, NULL); + if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler)) + return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler); + + return 0; +} + +static struct gpiod_lookup_table lenovo_yoga_tab2_1380_fc_gpios = { + .dev_id = "serial0-0", + .table = { + GPIO_LOOKUP("INT33FC:00", 57, "uart3_txd", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:00", 61, "uart3_rxd", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static struct gpiod_lookup_table * const lenovo_yoga_tab2_1380_gpios[] = { + &lenovo_yoga_tab2_830_1050_codec_gpios, + &lenovo_yoga_tab2_1380_fc_gpios, + NULL +}; + +const struct x86_dev_info lenovo_yoga_tab2_1380_info __initconst = { + .i2c_client_info = lenovo_yoga_tab2_1380_i2c_clients, + .i2c_client_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_i2c_clients), + .pdev_info = lenovo_yoga_tab2_1380_pdevs, + .pdev_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_pdevs), + .gpio_button = &lenovo_yoga_tab2_830_1050_lid, + .gpio_button_count = 1, + .gpiod_lookup_tables = lenovo_yoga_tab2_1380_gpios, + .bat_swnode = &generic_lipo_hv_4v35_battery_node, + .modules = lenovo_yoga_tab2_1380_modules, + .init = lenovo_yoga_tab2_1380_init, + .exit = lenovo_yoga_tab2_830_1050_exit, +}; + /* Lenovo Yoga Tab 3 Pro YT3-X90F */ /* diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h index 468993edfe..821dc094b0 100644 --- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h +++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h @@ -112,6 +112,7 @@ extern const struct x86_dev_info czc_p10t; extern const struct x86_dev_info lenovo_yogabook_x90_info; extern const struct x86_dev_info lenovo_yogabook_x91_info; extern const struct x86_dev_info lenovo_yoga_tab2_830_1050_info; +extern const struct x86_dev_info lenovo_yoga_tab2_1380_info; extern const struct x86_dev_info lenovo_yt3_info; extern const struct x86_dev_info medion_lifetab_s10346_info; extern const struct x86_dev_info nextbook_ares8_info; diff --git a/drivers/platform/x86/xiaomi-wmi.c b/drivers/platform/x86/xiaomi-wmi.c index 54a2546bb9..be80f0bda9 100644 --- a/drivers/platform/x86/xiaomi-wmi.c +++ b/drivers/platform/x86/xiaomi-wmi.c @@ -2,8 +2,10 @@ /* WMI driver for Xiaomi Laptops */ #include <linux/acpi.h> +#include <linux/device.h> #include <linux/input.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/wmi.h> #include <uapi/linux/input-event-codes.h> @@ -20,12 +22,21 @@ struct xiaomi_wmi { struct input_dev *input_dev; + struct mutex key_lock; /* Protects the key event sequence */ unsigned int key_code; }; +static void xiaomi_mutex_destroy(void *data) +{ + struct mutex *lock = data; + + mutex_destroy(lock); +} + static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context) { struct xiaomi_wmi *data; + int ret; if (wdev == NULL || context == NULL) return -EINVAL; @@ -35,6 +46,11 @@ static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context) return -ENOMEM; dev_set_drvdata(&wdev->dev, data); + mutex_init(&data->key_lock); + ret = devm_add_action_or_reset(&wdev->dev, xiaomi_mutex_destroy, &data->key_lock); + if (ret < 0) + return ret; + data->input_dev = devm_input_allocate_device(&wdev->dev); if (data->input_dev == NULL) return -ENOMEM; @@ -59,10 +75,12 @@ static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy) if (data == NULL) return; + mutex_lock(&data->key_lock); input_report_key(data->input_dev, data->key_code, 1); input_sync(data->input_dev); input_report_key(data->input_dev, data->key_code, 0); input_sync(data->input_dev); + mutex_unlock(&data->key_lock); } static const struct wmi_device_id xiaomi_wmi_id_table[] = { diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 4215ffd9b1..c40eda92a8 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -184,6 +184,16 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) return pd_to_genpd(dev->pm_domain); } +struct device *dev_to_genpd_dev(struct device *dev) +{ + struct generic_pm_domain *genpd = dev_to_genpd(dev); + + if (IS_ERR(genpd)) + return ERR_CAST(genpd); + + return &genpd->dev; +} + static int genpd_stop_dev(const struct generic_pm_domain *genpd, struct device *dev) { diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c index 9dddf227a3..1510d5ddae 100644 --- a/drivers/pmdomain/ti/ti_sci_pm_domains.c +++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c @@ -114,6 +114,18 @@ static const struct of_device_id ti_sci_pm_domain_matches[] = { }; MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches); +static bool ti_sci_pm_idx_exists(struct ti_sci_genpd_provider *pd_provider, u32 idx) +{ + struct ti_sci_pm_domain *pd; + + list_for_each_entry(pd, &pd_provider->pd_list, node) { + if (pd->idx == idx) + return true; + } + + return false; +} + static int ti_sci_pm_domain_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -149,8 +161,14 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev) break; if (args.args_count >= 1 && args.np == dev->of_node) { - if (args.args[0] > max_id) + if (args.args[0] > max_id) { max_id = args.args[0]; + } else { + if (ti_sci_pm_idx_exists(pd_provider, args.args[0])) { + index++; + continue; + } + } pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); if (!pd) { diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index b6c9637677..8008e31c0c 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -5,6 +5,7 @@ * Copyright (c) 2014 - 2018 Google, Inc */ +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> @@ -711,16 +712,22 @@ static int cros_usbpd_charger_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(cros_usbpd_charger_pm_ops, NULL, cros_usbpd_charger_resume); +static const struct platform_device_id cros_usbpd_charger_id[] = { + { DRV_NAME, 0 }, + {} +}; +MODULE_DEVICE_TABLE(platform, cros_usbpd_charger_id); + static struct platform_driver cros_usbpd_charger_driver = { .driver = { .name = DRV_NAME, .pm = &cros_usbpd_charger_pm_ops, }, - .probe = cros_usbpd_charger_probe + .probe = cros_usbpd_charger_probe, + .id_table = cros_usbpd_charger_id, }; module_platform_driver(cros_usbpd_charger_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ChromeOS EC USBPD charger"); -MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 0d2c3724d0..b86e11bdc0 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -271,23 +271,6 @@ static ssize_t power_supply_show_usb_type(struct device *dev, return count; } -static ssize_t power_supply_show_charge_behaviour(struct device *dev, - struct power_supply *psy, - union power_supply_propval *value, - char *buf) -{ - int ret; - - ret = power_supply_get_property(psy, - POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR, - value); - if (ret < 0) - return ret; - - return power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours, - value->intval, buf); -} - static ssize_t power_supply_show_property(struct device *dev, struct device_attribute *attr, char *buf) { @@ -321,7 +304,8 @@ static ssize_t power_supply_show_property(struct device *dev, &value, buf); break; case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR: - ret = power_supply_show_charge_behaviour(dev, psy, &value, buf); + ret = power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours, + value.intval, buf); break; case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER: ret = sysfs_emit(buf, "%s\n", value.strval); diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 7513018c9f..2067b0120d 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, } if (info->verify(info, pin, func, chan)) { - pr_err("driver cannot use function %u on pin %u\n", func, chan); + pr_err("driver cannot use function %u and channel %u on pin %u\n", + func, chan, pin); return -EOPNOTSUPP; } diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 6506cfb89a..ee2ced88ab 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -4562,7 +4562,7 @@ static int ptp_ocp_dpll_direction_set(const struct dpll_pin *pin, return -EOPNOTSUPP; mode = direction == DPLL_PIN_DIRECTION_INPUT ? SMA_MODE_IN : SMA_MODE_OUT; - return ptp_ocp_sma_store_val(bp, 0, mode, sma_nr); + return ptp_ocp_sma_store_val(bp, 0, mode, sma_nr + 1); } static int ptp_ocp_dpll_frequency_set(const struct dpll_pin *pin, @@ -4583,7 +4583,7 @@ static int ptp_ocp_dpll_frequency_set(const struct dpll_pin *pin, tbl = bp->sma_op->tbl[sma->mode]; for (i = 0; tbl[i].name; i++) if (tbl[i].frequency == frequency) - return ptp_ocp_sma_store_val(bp, i, sma->mode, sma_nr); + return ptp_ocp_sma_store_val(bp, i, sma->mode, sma_nr + 1); return -EINVAL; } @@ -4600,7 +4600,7 @@ static int ptp_ocp_dpll_frequency_get(const struct dpll_pin *pin, u32 val; int i; - val = bp->sma_op->get(bp, sma_nr); + val = bp->sma_op->get(bp, sma_nr + 1); tbl = bp->sma_op->tbl[sma->mode]; for (i = 0; tbl[i].name; i++) if (val == tbl[i].value) { diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index a15460aaa0..6b1b8f57cd 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -296,8 +296,7 @@ static ssize_t max_vclocks_store(struct device *dev, if (max < ptp->n_vclocks) goto out; - size = sizeof(int) * max; - vclock_index = kzalloc(size, GFP_KERNEL); + vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL); if (!vclock_index) { err = -ENOMEM; goto out; diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index a02fdbc612..e12b6ff70b 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -147,7 +147,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm, struct meson_pwm *meson = to_meson_pwm(chip); struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm]; unsigned int cnt, duty_cnt; - unsigned long fin_freq; + long fin_freq; u64 duty, period, freq; duty = state->duty_cycle; @@ -167,14 +167,15 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm, freq = ULONG_MAX; fin_freq = clk_round_rate(channel->clk, freq); - if (fin_freq == 0) { - dev_err(pwmchip_parent(chip), "invalid source clock frequency\n"); - return -EINVAL; + if (fin_freq <= 0) { + dev_err(pwmchip_parent(chip), + "invalid source clock frequency %llu\n", freq); + return fin_freq ? fin_freq : -EINVAL; } - dev_dbg(pwmchip_parent(chip), "fin_freq: %lu Hz\n", fin_freq); + dev_dbg(pwmchip_parent(chip), "fin_freq: %ld Hz\n", fin_freq); - cnt = div_u64(fin_freq * period, NSEC_PER_SEC); + cnt = mul_u64_u64_div_u64(fin_freq, period, NSEC_PER_SEC); if (cnt > 0xffff) { dev_err(pwmchip_parent(chip), "unable to get period cnt\n"); return -EINVAL; @@ -189,7 +190,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm, channel->hi = 0; channel->lo = cnt; } else { - duty_cnt = div_u64(fin_freq * duty, NSEC_PER_SEC); + duty_cnt = mul_u64_u64_div_u64(fin_freq, duty, NSEC_PER_SEC); dev_dbg(pwmchip_parent(chip), "duty=%llu duty_cnt=%u\n", duty, duty_cnt); diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index 39d80da0e1..f07b1126e7 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -624,32 +624,20 @@ static int sti_pwm_probe(struct platform_device *pdev) return ret; if (cdata->pwm_num_devs) { - pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm"); + pc->pwm_clk = devm_clk_get_prepared(dev, "pwm"); if (IS_ERR(pc->pwm_clk)) { dev_err(dev, "failed to get PWM clock\n"); return PTR_ERR(pc->pwm_clk); } - - ret = clk_prepare(pc->pwm_clk); - if (ret) { - dev_err(dev, "failed to prepare clock\n"); - return ret; - } } if (cdata->cpt_num_devs) { - pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture"); + pc->cpt_clk = devm_clk_get_prepared(dev, "capture"); if (IS_ERR(pc->cpt_clk)) { dev_err(dev, "failed to get PWM capture clock\n"); return PTR_ERR(pc->cpt_clk); } - ret = clk_prepare(pc->cpt_clk); - if (ret) { - dev_err(dev, "failed to prepare clock\n"); - return ret; - } - cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL); if (!cdata->ddata) return -ENOMEM; @@ -664,27 +652,7 @@ static int sti_pwm_probe(struct platform_device *pdev) mutex_init(&ddata->lock); } - ret = pwmchip_add(chip); - if (ret < 0) { - clk_unprepare(pc->pwm_clk); - clk_unprepare(pc->cpt_clk); - return ret; - } - - platform_set_drvdata(pdev, chip); - - return 0; -} - -static void sti_pwm_remove(struct platform_device *pdev) -{ - struct pwm_chip *chip = platform_get_drvdata(pdev); - struct sti_pwm_chip *pc = to_sti_pwmchip(chip); - - pwmchip_remove(chip); - - clk_unprepare(pc->pwm_clk); - clk_unprepare(pc->cpt_clk); + return devm_pwmchip_add(dev, chip); } static const struct of_device_id sti_pwm_of_match[] = { @@ -699,7 +667,6 @@ static struct platform_driver sti_pwm_driver = { .of_match_table = sti_pwm_of_match, }, .probe = sti_pwm_probe, - .remove_new = sti_pwm_remove, }; module_platform_driver(sti_pwm_driver); diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index 5de69e0bb0..196c1c8b57 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -224,7 +224,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo); int get_df_system_info(void); int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num); -int get_addr_hash_mi300(void); +int get_umc_info_mi300(void); int get_address_map(struct addr_ctx *ctx); diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c index 701349e849..6979fa3d4f 100644 --- a/drivers/ras/amd/atl/system.c +++ b/drivers/ras/amd/atl/system.c @@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg) if (reg == DF_FUNC0_ID_MI300) { df_cfg.flags.heterogeneous = 1; - if (get_addr_hash_mi300()) + if (get_umc_info_mi300()) return -EINVAL; } diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c index 59b6169093..a1b4accf7b 100644 --- a/drivers/ras/amd/atl/umc.c +++ b/drivers/ras/amd/atl/umc.c @@ -68,6 +68,8 @@ struct xor_bits { }; #define NUM_BANK_BITS 4 +#define NUM_COL_BITS 5 +#define NUM_SID_BITS 2 static struct { /* UMC::CH::AddrHashBank */ @@ -80,7 +82,22 @@ static struct { u8 bank_xor; } addr_hash; +static struct { + u8 bank[NUM_BANK_BITS]; + u8 col[NUM_COL_BITS]; + u8 sid[NUM_SID_BITS]; + u8 num_row_lo; + u8 num_row_hi; + u8 row_lo; + u8 row_hi; + u8 pc; +} bit_shifts; + #define MI300_UMC_CH_BASE 0x90000 +#define MI300_ADDR_CFG (MI300_UMC_CH_BASE + 0x30) +#define MI300_ADDR_SEL (MI300_UMC_CH_BASE + 0x40) +#define MI300_COL_SEL_LO (MI300_UMC_CH_BASE + 0x50) +#define MI300_ADDR_SEL_2 (MI300_UMC_CH_BASE + 0xA4) #define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8) #define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0) #define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4) @@ -90,17 +107,42 @@ static struct { #define ADDR_HASH_ROW_XOR GENMASK(31, 14) #define ADDR_HASH_BANK_XOR GENMASK(5, 0) +#define ADDR_CFG_NUM_ROW_LO GENMASK(11, 8) +#define ADDR_CFG_NUM_ROW_HI GENMASK(15, 12) + +#define ADDR_SEL_BANK0 GENMASK(3, 0) +#define ADDR_SEL_BANK1 GENMASK(7, 4) +#define ADDR_SEL_BANK2 GENMASK(11, 8) +#define ADDR_SEL_BANK3 GENMASK(15, 12) +#define ADDR_SEL_BANK4 GENMASK(20, 16) +#define ADDR_SEL_ROW_LO GENMASK(27, 24) +#define ADDR_SEL_ROW_HI GENMASK(31, 28) + +#define COL_SEL_LO_COL0 GENMASK(3, 0) +#define COL_SEL_LO_COL1 GENMASK(7, 4) +#define COL_SEL_LO_COL2 GENMASK(11, 8) +#define COL_SEL_LO_COL3 GENMASK(15, 12) +#define COL_SEL_LO_COL4 GENMASK(19, 16) + +#define ADDR_SEL_2_BANK5 GENMASK(4, 0) +#define ADDR_SEL_2_CHAN GENMASK(15, 12) + /* * Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used - * for hashing. Do this during module init, since the values will not - * change during run time. + * for hashing. + * + * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to + * get the values needed to reconstruct the normalized address. Apply additional + * offsets to the raw register values, as needed. + * + * Do this during module init, since the values will not change during run time. * * These registers are instantiated for each UMC across each AMD Node. * However, they should be identically programmed due to the fixed hardware * design of MI300 systems. So read the values from Node 0 UMC 0 and keep a * single global structure for simplicity. */ -int get_addr_hash_mi300(void) +int get_umc_info_mi300(void) { u32 temp; int ret; @@ -130,6 +172,44 @@ int get_addr_hash_mi300(void) addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp); + ret = amd_smn_read(0, MI300_ADDR_CFG, &temp); + if (ret) + return ret; + + bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp); + bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp); + + ret = amd_smn_read(0, MI300_ADDR_SEL, &temp); + if (ret) + return ret; + + bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp); + bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp); + bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp); + bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp); + /* Use BankBit4 for the SID0 position. */ + bit_shifts.sid[0] = 5 + FIELD_GET(ADDR_SEL_BANK4, temp); + bit_shifts.row_lo = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp); + bit_shifts.row_hi = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp); + + ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp); + if (ret) + return ret; + + bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp); + bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp); + bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp); + bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp); + bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp); + + ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp); + if (ret) + return ret; + + /* Use BankBit5 for the SID1 position. */ + bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp); + bit_shifts.pc = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp); + return 0; } @@ -146,9 +226,6 @@ int get_addr_hash_mi300(void) * The MCA address format is as follows: * MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]} * - * The normalized address format is fixed in hardware and is as follows: - * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]} - * * Additionally, the PC and Bank bits may be hashed. This must be accounted for before * reconstructing the normalized address. */ @@ -158,18 +235,10 @@ int get_addr_hash_mi300(void) #define MI300_UMC_MCA_PC BIT(25) #define MI300_UMC_MCA_SID GENMASK(27, 26) -#define MI300_NA_COL_1_0 GENMASK(6, 5) -#define MI300_NA_PC BIT(7) -#define MI300_NA_COL_3_2 GENMASK(9, 8) -#define MI300_NA_BANK_3_2 GENMASK(11, 10) -#define MI300_NA_BANK_1_0 GENMASK(13, 12) -#define MI300_NA_COL_4 BIT(14) -#define MI300_NA_ROW GENMASK(28, 15) -#define MI300_NA_SID GENMASK(30, 29) - static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) { - u16 i, col, row, bank, pc, sid, temp; + u16 i, col, row, bank, pc, sid; + u32 temp; col = FIELD_GET(MI300_UMC_MCA_COL, addr); bank = FIELD_GET(MI300_UMC_MCA_BANK, addr); @@ -189,49 +258,48 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) /* Calculate hash for PC bit. */ if (addr_hash.pc.xor_enable) { - /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */ - bank |= sid << 5; - temp = bitwise_xor_bits(col & addr_hash.pc.col_xor); temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor); - temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor); + /* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */ + temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor); pc ^= temp; - - /* Drop SID bits for the sake of debug printing later. */ - bank &= 0x1F; } /* Reconstruct the normalized address starting with NA[4:0] = 0 */ addr = 0; - /* NA[6:5] = Column[1:0] */ - temp = col & 0x3; - addr |= FIELD_PREP(MI300_NA_COL_1_0, temp); - - /* NA[7] = PC */ - addr |= FIELD_PREP(MI300_NA_PC, pc); - - /* NA[9:8] = Column[3:2] */ - temp = (col >> 2) & 0x3; - addr |= FIELD_PREP(MI300_NA_COL_3_2, temp); + /* Column bits */ + for (i = 0; i < NUM_COL_BITS; i++) { + temp = (col >> i) & 0x1; + addr |= temp << bit_shifts.col[i]; + } - /* NA[11:10] = Bank[3:2] */ - temp = (bank >> 2) & 0x3; - addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp); + /* Bank bits */ + for (i = 0; i < NUM_BANK_BITS; i++) { + temp = (bank >> i) & 0x1; + addr |= temp << bit_shifts.bank[i]; + } - /* NA[13:12] = Bank[1:0] */ - temp = bank & 0x3; - addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp); + /* Row lo bits */ + for (i = 0; i < bit_shifts.num_row_lo; i++) { + temp = (row >> i) & 0x1; + addr |= temp << (i + bit_shifts.row_lo); + } - /* NA[14] = Column[4] */ - temp = (col >> 4) & 0x1; - addr |= FIELD_PREP(MI300_NA_COL_4, temp); + /* Row hi bits */ + for (i = 0; i < bit_shifts.num_row_hi; i++) { + temp = (row >> (i + bit_shifts.num_row_lo)) & 0x1; + addr |= temp << (i + bit_shifts.row_hi); + } - /* NA[28:15] = Row[13:0] */ - addr |= FIELD_PREP(MI300_NA_ROW, row); + /* PC bit */ + addr |= pc << bit_shifts.pc; - /* NA[30:29] = SID[1:0] */ - addr |= FIELD_PREP(MI300_NA_SID, sid); + /* SID bits */ + for (i = 0; i < NUM_SID_BITS; i++) { + temp = (sid >> i) & 0x1; + addr |= temp << bit_shifts.sid[i]; + } pr_debug("Addr=0x%016lx", addr); pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid); diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c index 26192d55a6..79fbb45297 100644 --- a/drivers/regulator/bd71815-regulator.c +++ b/drivers/regulator/bd71815-regulator.c @@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np, * 10: 2.50mV/usec 10mV 4uS * 11: 1.25mV/usec 10mV 8uS */ -static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 }; +static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 }; static int bd7181x_led_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c index 08d4ee3692..dd871ffe97 100644 --- a/drivers/regulator/bd71828-regulator.c +++ b/drivers/regulator/bd71828-regulator.c @@ -206,14 +206,11 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT, .suspend_mask = BD71828_MASK_BUCK1267_VOLT, .suspend_on_mask = BD71828_MASK_SUSP_EN, - .lpsr_on_mask = BD71828_MASK_LPSR_EN, /* * LPSR voltage is same as SUSPEND voltage. Allow - * setting it so that regulator can be set enabled at - * LPSR state + * only enabling/disabling regulator for LPSR state */ - .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT, - .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, }, .reg_inits = buck1_inits, .reg_init_amnt = ARRAY_SIZE(buck1_inits), @@ -288,13 +285,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_BUCK3_VOLT, - .idle_reg = BD71828_REG_BUCK3_VOLT, - .suspend_reg = BD71828_REG_BUCK3_VOLT, - .lpsr_reg = BD71828_REG_BUCK3_VOLT, .run_mask = BD71828_MASK_BUCK3_VOLT, - .idle_mask = BD71828_MASK_BUCK3_VOLT, - .suspend_mask = BD71828_MASK_BUCK3_VOLT, - .lpsr_mask = BD71828_MASK_BUCK3_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -329,13 +320,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_BUCK4_VOLT, - .idle_reg = BD71828_REG_BUCK4_VOLT, - .suspend_reg = BD71828_REG_BUCK4_VOLT, - .lpsr_reg = BD71828_REG_BUCK4_VOLT, .run_mask = BD71828_MASK_BUCK4_VOLT, - .idle_mask = BD71828_MASK_BUCK4_VOLT, - .suspend_mask = BD71828_MASK_BUCK4_VOLT, - .lpsr_mask = BD71828_MASK_BUCK4_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -370,13 +355,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_BUCK5_VOLT, - .idle_reg = BD71828_REG_BUCK5_VOLT, - .suspend_reg = BD71828_REG_BUCK5_VOLT, - .lpsr_reg = BD71828_REG_BUCK5_VOLT, .run_mask = BD71828_MASK_BUCK5_VOLT, - .idle_mask = BD71828_MASK_BUCK5_VOLT, - .suspend_mask = BD71828_MASK_BUCK5_VOLT, - .lpsr_mask = BD71828_MASK_BUCK5_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -493,13 +472,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_LDO1_VOLT, - .idle_reg = BD71828_REG_LDO1_VOLT, - .suspend_reg = BD71828_REG_LDO1_VOLT, - .lpsr_reg = BD71828_REG_LDO1_VOLT, .run_mask = BD71828_MASK_LDO_VOLT, - .idle_mask = BD71828_MASK_LDO_VOLT, - .suspend_mask = BD71828_MASK_LDO_VOLT, - .lpsr_mask = BD71828_MASK_LDO_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -533,13 +506,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_LDO2_VOLT, - .idle_reg = BD71828_REG_LDO2_VOLT, - .suspend_reg = BD71828_REG_LDO2_VOLT, - .lpsr_reg = BD71828_REG_LDO2_VOLT, .run_mask = BD71828_MASK_LDO_VOLT, - .idle_mask = BD71828_MASK_LDO_VOLT, - .suspend_mask = BD71828_MASK_LDO_VOLT, - .lpsr_mask = BD71828_MASK_LDO_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -573,13 +540,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_LDO3_VOLT, - .idle_reg = BD71828_REG_LDO3_VOLT, - .suspend_reg = BD71828_REG_LDO3_VOLT, - .lpsr_reg = BD71828_REG_LDO3_VOLT, .run_mask = BD71828_MASK_LDO_VOLT, - .idle_mask = BD71828_MASK_LDO_VOLT, - .suspend_mask = BD71828_MASK_LDO_VOLT, - .lpsr_mask = BD71828_MASK_LDO_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -614,13 +575,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_LDO4_VOLT, - .idle_reg = BD71828_REG_LDO4_VOLT, - .suspend_reg = BD71828_REG_LDO4_VOLT, - .lpsr_reg = BD71828_REG_LDO4_VOLT, .run_mask = BD71828_MASK_LDO_VOLT, - .idle_mask = BD71828_MASK_LDO_VOLT, - .suspend_mask = BD71828_MASK_LDO_VOLT, - .lpsr_mask = BD71828_MASK_LDO_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -655,13 +610,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { ROHM_DVS_LEVEL_SUSPEND | ROHM_DVS_LEVEL_LPSR, .run_reg = BD71828_REG_LDO5_VOLT, - .idle_reg = BD71828_REG_LDO5_VOLT, - .suspend_reg = BD71828_REG_LDO5_VOLT, - .lpsr_reg = BD71828_REG_LDO5_VOLT, .run_mask = BD71828_MASK_LDO_VOLT, - .idle_mask = BD71828_MASK_LDO_VOLT, - .suspend_mask = BD71828_MASK_LDO_VOLT, - .lpsr_mask = BD71828_MASK_LDO_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, @@ -720,9 +669,6 @@ static const struct bd71828_regulator_data bd71828_rdata[] = { .suspend_reg = BD71828_REG_LDO7_VOLT, .lpsr_reg = BD71828_REG_LDO7_VOLT, .run_mask = BD71828_MASK_LDO_VOLT, - .idle_mask = BD71828_MASK_LDO_VOLT, - .suspend_mask = BD71828_MASK_LDO_VOLT, - .lpsr_mask = BD71828_MASK_LDO_VOLT, .idle_on_mask = BD71828_MASK_IDLE_EN, .suspend_on_mask = BD71828_MASK_SUSP_EN, .lpsr_on_mask = BD71828_MASK_LPSR_EN, diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 2c33653ffd..51ff3f9eaf 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3347,6 +3347,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator) return map ? map : ERR_PTR(-EOPNOTSUPP); } +EXPORT_SYMBOL_GPL(regulator_get_regmap); /** * regulator_get_hardware_vsel_register - get the HW voltage selector register diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index d492683365..6e1ace660b 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -161,6 +161,32 @@ int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev) } EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_pickable_regmap); +static int write_separate_vsel_and_range(struct regulator_dev *rdev, + unsigned int sel, unsigned int range) +{ + bool range_updated; + int ret; + + ret = regmap_update_bits_base(rdev->regmap, rdev->desc->vsel_range_reg, + rdev->desc->vsel_range_mask, + range, &range_updated, false, false); + if (ret) + return ret; + + /* + * Some PMICs treat the vsel_reg same as apply-bit. Force it to be + * written if the range changed, even if the old selector was same as + * the new one + */ + if (rdev->desc->range_applied_by_vsel && range_updated) + return regmap_write_bits(rdev->regmap, + rdev->desc->vsel_reg, + rdev->desc->vsel_mask, sel); + + return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, + rdev->desc->vsel_mask, sel); +} + /** * regulator_set_voltage_sel_pickable_regmap - pickable range set_voltage_sel * @@ -199,21 +225,12 @@ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev, range = rdev->desc->linear_range_selectors_bitfield[i]; range <<= ffs(rdev->desc->vsel_range_mask) - 1; - if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) { - ret = regmap_update_bits(rdev->regmap, - rdev->desc->vsel_reg, + if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) + ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, rdev->desc->vsel_range_mask | rdev->desc->vsel_mask, sel | range); - } else { - ret = regmap_update_bits(rdev->regmap, - rdev->desc->vsel_range_reg, - rdev->desc->vsel_range_mask, range); - if (ret) - return ret; - - ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, - rdev->desc->vsel_mask, sel); - } + else + ret = write_separate_vsel_and_range(rdev, sel, range); if (ret) return ret; diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c index 9b7c3d7778..3c9d79e003 100644 --- a/drivers/regulator/tps6287x-regulator.c +++ b/drivers/regulator/tps6287x-regulator.c @@ -115,6 +115,7 @@ static struct regulator_desc tps6287x_reg = { .vsel_mask = 0xFF, .vsel_range_reg = TPS6287X_CTRL2, .vsel_range_mask = TPS6287X_CTRL2_VRANGE, + .range_applied_by_vsel = true, .ramp_reg = TPS6287X_CTRL1, .ramp_mask = TPS6287X_CTRL1_VRAMP, .ramp_delay_table = tps6287x_ramp_table, diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c index b7f0c87797..5fad61785e 100644 --- a/drivers/regulator/tps6594-regulator.c +++ b/drivers/regulator/tps6594-regulator.c @@ -287,30 +287,30 @@ static struct tps6594_regulator_irq_type *tps6594_ldos_irq_types[] = { static const struct regulator_desc multi_regs[] = { TPS6594_REGULATOR("BUCK12", "buck12", TPS6594_BUCK_1, REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_VOUT_1(1), + TPS6594_REG_BUCKX_VOUT_1(0), TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_CTRL(1), + TPS6594_REG_BUCKX_CTRL(0), TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges, 4, 4000, 0, NULL, 0, 0), TPS6594_REGULATOR("BUCK34", "buck34", TPS6594_BUCK_3, REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_VOUT_1(3), + TPS6594_REG_BUCKX_VOUT_1(2), TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_CTRL(3), + TPS6594_REG_BUCKX_CTRL(2), TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges, 4, 0, 0, NULL, 0, 0), TPS6594_REGULATOR("BUCK123", "buck123", TPS6594_BUCK_1, REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_VOUT_1(1), + TPS6594_REG_BUCKX_VOUT_1(0), TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_CTRL(1), + TPS6594_REG_BUCKX_CTRL(0), TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges, 4, 4000, 0, NULL, 0, 0), TPS6594_REGULATOR("BUCK1234", "buck1234", TPS6594_BUCK_1, REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_VOUT_1(1), + TPS6594_REG_BUCKX_VOUT_1(0), TPS6594_MASK_BUCKS_VSET, - TPS6594_REG_BUCKX_CTRL(1), + TPS6594_REG_BUCKX_CTRL(0), TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges, 4, 4000, 0, NULL, 0, 0), }; diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index ad3415a385..50e486bcfa 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -103,12 +103,14 @@ struct k3_r5_soc_data { * @dev: cached device pointer * @mode: Mode to configure the Cluster - Split or LockStep * @cores: list of R5 cores within the cluster + * @core_transition: wait queue to sync core state changes * @soc_data: SoC-specific feature data for a R5FSS */ struct k3_r5_cluster { struct device *dev; enum cluster_mode mode; struct list_head cores; + wait_queue_head_t core_transition; const struct k3_r5_soc_data *soc_data; }; @@ -128,6 +130,7 @@ struct k3_r5_cluster { * @atcm_enable: flag to control ATCM enablement * @btcm_enable: flag to control BTCM enablement * @loczrama: flag to dictate which TCM is at device address 0x0 + * @released_from_reset: flag to signal when core is out of reset */ struct k3_r5_core { struct list_head elem; @@ -144,6 +147,7 @@ struct k3_r5_core { u32 atcm_enable; u32 btcm_enable; u32 loczrama; + bool released_from_reset; }; /** @@ -460,6 +464,8 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) ret); return ret; } + core->released_from_reset = true; + wake_up_interruptible(&cluster->core_transition); /* * Newer IP revisions like on J7200 SoCs support h/w auto-initialization @@ -542,7 +548,7 @@ static int k3_r5_rproc_start(struct rproc *rproc) struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; struct device *dev = kproc->dev; - struct k3_r5_core *core; + struct k3_r5_core *core0, *core; u32 boot_addr; int ret; @@ -568,6 +574,16 @@ static int k3_r5_rproc_start(struct rproc *rproc) goto unroll_core_run; } } else { + /* do not allow core 1 to start before core 0 */ + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, + elem); + if (core != core0 && core0->rproc->state == RPROC_OFFLINE) { + dev_err(dev, "%s: can not start core 1 before core 0\n", + __func__); + ret = -EPERM; + goto put_mbox; + } + ret = k3_r5_core_run(core); if (ret) goto put_mbox; @@ -613,7 +629,8 @@ static int k3_r5_rproc_stop(struct rproc *rproc) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct device *dev = kproc->dev; + struct k3_r5_core *core1, *core = kproc->core; int ret; /* halt all applicable cores */ @@ -626,6 +643,16 @@ static int k3_r5_rproc_stop(struct rproc *rproc) } } } else { + /* do not allow core 0 to stop before core 1 */ + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, + elem); + if (core != core1 && core1->rproc->state != RPROC_OFFLINE) { + dev_err(dev, "%s: can not stop core 0 before core 1\n", + __func__); + ret = -EPERM; + goto out; + } + ret = k3_r5_core_halt(core); if (ret) goto out; @@ -1140,6 +1167,12 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) return ret; } + /* + * Skip the waiting mechanism for sequential power-on of cores if the + * core has already been booted by another entity. + */ + core->released_from_reset = c_state; + ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat); if (ret < 0) { @@ -1280,6 +1313,26 @@ init_rmem: cluster->mode == CLUSTER_MODE_SINGLECPU || cluster->mode == CLUSTER_MODE_SINGLECORE) break; + + /* + * R5 cores require to be powered on sequentially, core0 + * should be in higher power state than core1 in a cluster + * So, wait for current core to power up before proceeding + * to next core and put timeout of 2sec for each core. + * + * This waiting mechanism is necessary because + * rproc_auto_boot_callback() for core1 can be called before + * core0 due to thread execution order. + */ + ret = wait_event_interruptible_timeout(cluster->core_transition, + core->released_from_reset, + msecs_to_jiffies(2000)); + if (ret <= 0) { + dev_err(dev, + "Timed out waiting for %s core to power up!\n", + rproc->name); + return ret; + } } return 0; @@ -1709,6 +1762,7 @@ static int k3_r5_probe(struct platform_device *pdev) cluster->dev = dev; cluster->soc_data = data; INIT_LIST_HEAD(&cluster->cores); + init_waitqueue_head(&cluster->core_transition); ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode); if (ret < 0 && ret != -EINVAL) { diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h index 86993de253..a4c5c6736b 100644 --- a/drivers/s390/cio/trace.h +++ b/drivers/s390/cio/trace.h @@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(s390_class_schib, __entry->devno = schib->pmcw.dev; __entry->schib = *schib; __entry->pmcw_ena = schib->pmcw.ena; - __entry->pmcw_st = schib->pmcw.ena; + __entry->pmcw_st = schib->pmcw.st; __entry->pmcw_dnv = schib->pmcw.dnv; __entry->pmcw_dev = schib->pmcw.dev; __entry->pmcw_lpm = schib->pmcw.lpm; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index cce0bafd4c..ca7e626a97 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -767,9 +767,9 @@ static void ap_check_bindings_complete(void) if (bound == apqns) { if (!completion_done(&ap_apqn_bindings_complete)) { complete_all(&ap_apqn_bindings_complete); + ap_send_bindings_complete_uevent(); pr_debug("%s all apqn bindings complete\n", __func__); } - ap_send_bindings_complete_uevent(); } } } @@ -929,6 +929,12 @@ static int ap_device_probe(struct device *dev) goto out; } + /* + * Rearm the bindings complete completion to trigger + * bindings complete when all devices are bound again + */ + reinit_completion(&ap_apqn_bindings_complete); + /* Add queue/card to list of active queues/cards */ spin_lock_bh(&ap_queues_lock); if (is_queue_dev(dev)) @@ -1123,7 +1129,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) */ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits) { - int a, i, z; + unsigned long a, i, z; char *np, sign; /* bits needs to be a multiple of 8 */ diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 613eab7297..41fe8a043d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -956,7 +956,7 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, struct dst_entry *dst = skb_dst(skb); struct rt6_info *rt; - rt = (struct rt6_info *) dst; + rt = dst_rt6_info(dst); if (dst) { if (proto == htons(ETH_P_IPV6)) dst = dst_check(dst, rt6_get_cookie(rt)); @@ -970,15 +970,14 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb, struct dst_entry *dst) { - struct rtable *rt = (struct rtable *) dst; - - return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr; + return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) : + ip_hdr(skb)->daddr; } static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb, struct dst_entry *dst) { - struct rt6_info *rt = (struct rt6_info *) dst; + struct rt6_info *rt = dst_rt6_info(dst); if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) return &rt->rt6i_gateway; diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 52db147d99..f6dd077d47 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -250,7 +250,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); @@ -317,7 +317,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index af18d20f30..49c57a9c11 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; - sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *)); if (sh == NULL) { dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f6e6db8b8a..e97f4e01a8 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -239,8 +239,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, /* help some expanders that fail to zero sas_address in the 'no * device' case */ - if (phy->attached_dev_type == SAS_PHY_UNUSED || - phy->linkrate < SAS_LINK_RATE_1_5_GBPS) + if (phy->attached_dev_type == SAS_PHY_UNUSED) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 55d590b919..6a3db7032c 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -2158,10 +2158,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(persistent_id); +/** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** + * sas_ncq_prio_enable_show - send prioritized io commands to device + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_enable attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read/write' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + + if (!sdev_priv_data) + return 0; + + return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + bool ncq_prio_enable = 0; + + if (kstrtobool(buf, &ncq_prio_enable)) + return -EINVAL; + + if (!sas_ata_ncq_prio_supported(sdev)) + return -EINVAL; + + sdev_priv_data->ncq_prio_enable = ncq_prio_enable; + + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + static struct attribute *mpi3mr_dev_attrs[] = { &dev_attr_sas_address.attr, &dev_attr_device_handle.attr, &dev_attr_persistent_id.attr, + &dev_attr_sas_ncq_prio_supported.attr, + &dev_attr_sas_ncq_prio_enable.attr, NULL, }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1b492e9a3e..86f553c617 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pd_handles_sz++; + /* + * pd_handles_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may occur. + */ + ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long)); + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); if (!ioc->pd_handles) { @@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pend_os_device_add_sz++; + + /* + * pend_os_device_add_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory may occur. + */ + ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz, + sizeof(unsigned long)); ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, GFP_KERNEL); if (!ioc->pend_os_device_add) { @@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) if (ioc->facts.MaxDevHandle % 8) pd_handles_sz++; + /* + * pd_handles should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may + * occur. + */ + pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long)); pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, GFP_KERNEL); if (!pd_handles) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index bf100a4ebf..fe1e96fda2 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -2048,9 +2048,6 @@ void mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request); -/* NCQ Prio Handling Check */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev); - void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_init_debugfs(void); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 1c9fd26195..87784c9624 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev, { struct scsi_device *sdev = to_scsi_device(dev); - return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); } static DEVICE_ATTR_RO(sas_ncq_prio_supported); @@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev, if (kstrtobool(buf, &ncq_prio_enable)) return -EINVAL; - if (!scsih_ncq_prio_supp(sdev)) + if (!sas_ata_ncq_prio_supported(sdev)) return -EINVAL; sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index ef8ee93005..7e923e0249 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -12573,29 +12573,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -/** - * scsih_ncq_prio_supp - Check for NCQ command priority support - * @sdev: scsi device struct - * - * This is called when a user indicates they would like to enable - * ncq command priorities. This works only on SATA devices. - */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev) -{ - struct scsi_vpd *vpd; - bool ncq_prio_supp = false; - - rcu_read_lock(); - vpd = rcu_dereference(sdev->vpd_pg89); - if (!vpd || vpd->len < 214) - goto out; - - ncq_prio_supp = (vpd->data[213] >> 4) & 1; -out: - rcu_read_unlock(); - - return ncq_prio_supp; -} /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index 451fd236bf..96174353e3 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -170,7 +170,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, if (!count || *ppos) return 0; - kern_buf = memdup_user(buffer, count); + kern_buf = memdup_user_nul(buffer, count); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index 8deb2001dc..37eed6a278 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -120,15 +120,11 @@ static ssize_t qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - size_t cnt = 0; - - if (*ppos) - return 0; + char buf[64]; + int len; - cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover); - cnt = min_t(int, count, cnt - *ppos); - *ppos += cnt; - return cnt; + len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover); + return simple_read_from_buffer(buffer, count, ppos, buf, len); } static int diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 55ff3d7482..a1545dad0c 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -274,7 +274,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n", iocbs_used, ha->base_qpair->fwres.iocbs_limit); - seq_printf(s, "estimate exchange used[%d] high water limit [%d] n", + seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n", exch_used, ha->base_qpair->fwres.exch_limit); if (ql2xenforce_iocb_limit == 2) { diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3e0c038127..ee69bd3588 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) if (result < SCSI_VPD_HEADER_SIZE) return 0; + if (result > sizeof(vpd)) { + dev_warn_once(&sdev->sdev_gendev, + "%s: long VPD page 0 length: %d bytes\n", + __func__, result); + result = sizeof(vpd); + } + result -= SCSI_VPD_HEADER_SIZE; if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) return 0; @@ -666,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev) sdev->use_10_for_rw = 0; sdev->cdl_supported = 1; + + /* + * If the device supports CDL, make sure that the current drive + * feature status is consistent with the user controlled + * cdl_enable state. + */ + scsi_cdl_enable(sdev, sdev->cdl_enable); } else { sdev->cdl_supported = 0; } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index d704c484a2..7fdd2b61fe 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) } EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); +/** + * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support + * @sdev: SCSI device + * + * Check if an ATA device supports NCQ priority using VPD page 89h (ATA + * Information). Since this VPD page is implemented only for ATA devices, + * this function always returns false for SCSI devices. + */ +bool sas_ata_ncq_prio_supported(struct scsi_device *sdev) +{ + struct scsi_vpd *vpd; + bool ncq_prio_supported = false; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (vpd && vpd->len >= 214) + ncq_prio_supported = (vpd->data[213] >> 4) & 1; + rcu_read_unlock(); + + return ncq_prio_supported; +} +EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported); + /* * SAS Phy attributes */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 65cdc8b77e..a4638ea925 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -63,6 +63,7 @@ #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_devinfo.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> @@ -3125,6 +3126,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer) struct scsi_mode_data data; int res; + if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS) + return; + res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a, /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT, sdkp->max_retries, &data, &sshdr); @@ -3572,16 +3576,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, static void sd_read_block_zero(struct scsi_disk *sdkp) { - unsigned int buf_len = sdkp->device->sector_size; - char *buffer, cmd[10] = { }; + struct scsi_device *sdev = sdkp->device; + unsigned int buf_len = sdev->sector_size; + u8 *buffer, cmd[16] = { }; buffer = kmalloc(buf_len, GFP_KERNEL); if (!buffer) return; - cmd[0] = READ_10; - put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ - put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + if (sdev->use_16_for_rw) { + cmd[0] = READ_16; + put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */ + } else { + cmd[0] = READ_10; + put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + } scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len, SD_TIMEOUT, sdkp->max_retries, NULL); @@ -3707,8 +3718,10 @@ static int sd_revalidate_disk(struct gendisk *disk) */ if (sdkp->first_scan || q->limits.max_sectors > q->limits.max_dev_sectors || - q->limits.max_sectors > q->limits.max_hw_sectors) + q->limits.max_sectors > q->limits.max_hw_sectors) { q->limits.max_sectors = rw_max; + q->limits.max_user_sectors = rw_max; + } sdkp->first_scan = 0; diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index b0cd071c47..0b2e5690da 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -14,7 +14,8 @@ #define CMDQ_POLL_ENABLE_MASK BIT(0) #define CMDQ_EOC_IRQ_EN BIT(0) #define CMDQ_REG_TYPE 1 -#define CMDQ_JUMP_RELATIVE 1 +#define CMDQ_JUMP_RELATIVE 0 +#define CMDQ_JUMP_ABSOLUTE 1 struct cmdq_instruction { union { @@ -397,7 +398,7 @@ int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr) struct cmdq_instruction inst = {}; inst.op = CMDQ_CODE_JUMP; - inst.offset = CMDQ_JUMP_RELATIVE; + inst.offset = CMDQ_JUMP_ABSOLUTE; inst.value = addr >> cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan); return cmdq_pkt_append_command(pkt, inst); diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c index a5fd68411b..86fb2cd4f4 100644 --- a/drivers/soc/qcom/cmd-db.c +++ b/drivers/soc/qcom/cmd-db.c @@ -1,6 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */ +/* + * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/bitfield.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> @@ -17,6 +21,8 @@ #define MAX_SLV_ID 8 #define SLAVE_ID_MASK 0x7 #define SLAVE_ID_SHIFT 16 +#define SLAVE_ID(addr) FIELD_GET(GENMASK(19, 16), addr) +#define VRM_ADDR(addr) FIELD_GET(GENMASK(19, 4), addr) /** * struct entry_header: header for each entry in cmddb @@ -221,6 +227,30 @@ const void *cmd_db_read_aux_data(const char *id, size_t *len) EXPORT_SYMBOL_GPL(cmd_db_read_aux_data); /** + * cmd_db_match_resource_addr() - Compare if both Resource addresses are same + * + * @addr1: Resource address to compare + * @addr2: Resource address to compare + * + * Return: true if two addresses refer to the same resource, false otherwise + */ +bool cmd_db_match_resource_addr(u32 addr1, u32 addr2) +{ + /* + * Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte + * aligned addresses associated with it. Ignore the offset to check + * for VRM requests. + */ + if (addr1 == addr2) + return true; + else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2)) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr); + +/** * cmd_db_read_slave_id - Get the slave ID for a given resource address * * @id: Resource id to query the DB for version diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index f913e9bd57..823fd108fa 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <linux/soc/qcom/pdr.h> #include <linux/soc/qcom/pmic_glink.h> +#include <linux/spinlock.h> enum { PMIC_GLINK_CLIENT_BATT = 0, @@ -36,7 +37,7 @@ struct pmic_glink { unsigned int pdr_state; /* serializing clients list updates */ - struct mutex client_lock; + spinlock_t client_lock; struct list_head clients; }; @@ -58,10 +59,11 @@ static void _devm_pmic_glink_release_client(struct device *dev, void *res) { struct pmic_glink_client *client = (struct pmic_glink_client *)res; struct pmic_glink *pg = client->pg; + unsigned long flags; - mutex_lock(&pg->client_lock); + spin_lock_irqsave(&pg->client_lock, flags); list_del(&client->node); - mutex_unlock(&pg->client_lock); + spin_unlock_irqrestore(&pg->client_lock, flags); } struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, @@ -72,6 +74,7 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, { struct pmic_glink_client *client; struct pmic_glink *pg = dev_get_drvdata(dev->parent); + unsigned long flags; client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL); if (!client) @@ -83,9 +86,14 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, client->pdr_notify = pdr; client->priv = priv; - mutex_lock(&pg->client_lock); + mutex_lock(&pg->state_lock); + spin_lock_irqsave(&pg->client_lock, flags); + list_add(&client->node, &pg->clients); - mutex_unlock(&pg->client_lock); + client->pdr_notify(client->priv, pg->client_state); + + spin_unlock_irqrestore(&pg->client_lock, flags); + mutex_unlock(&pg->state_lock); devres_add(dev, client); @@ -107,6 +115,7 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data, struct pmic_glink_client *client; struct pmic_glink_hdr *hdr; struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev); + unsigned long flags; if (len < sizeof(*hdr)) { dev_warn(pg->dev, "ignoring truncated message\n"); @@ -115,10 +124,12 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data, hdr = data; + spin_lock_irqsave(&pg->client_lock, flags); list_for_each_entry(client, &pg->clients, node) { if (client->id == le32_to_cpu(hdr->owner)) client->cb(data, len, client->priv); } + spin_unlock_irqrestore(&pg->client_lock, flags); return 0; } @@ -158,6 +169,7 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg) { struct pmic_glink_client *client; unsigned int new_state = pg->client_state; + unsigned long flags; if (pg->client_state != SERVREG_SERVICE_STATE_UP) { if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept) @@ -168,8 +180,10 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg) } if (new_state != pg->client_state) { + spin_lock_irqsave(&pg->client_lock, flags); list_for_each_entry(client, &pg->clients, node) client->pdr_notify(client->priv, new_state); + spin_unlock_irqrestore(&pg->client_lock, flags); pg->client_state = new_state; } } @@ -256,7 +270,7 @@ static int pmic_glink_probe(struct platform_device *pdev) pg->dev = &pdev->dev; INIT_LIST_HEAD(&pg->clients); - mutex_init(&pg->client_lock); + spin_lock_init(&pg->client_lock); mutex_init(&pg->state_lock); match_data = (unsigned long *)of_device_get_match_data(&pdev->dev); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index a021dc7180..daf64be966 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME @@ -557,7 +558,7 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) { addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j); for (k = 0; k < msg->num_cmds; k++) { - if (addr == msg->cmds[k].addr) + if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr)) return -EBUSY; } } diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 0efc1c3bee..3e7cf04aaf 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -1880,7 +1880,7 @@ struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns, /* check if we found a PDI, else find in bi-directional */ if (!pdi) - pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd, + pdi = cdns_find_pdi(cdns, 0, stream->num_bd, stream->bd, dai_id); if (pdi) { diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index aabef9fc84..0d9c948e11 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -21,7 +21,7 @@ #include <linux/units.h> #define CS42L43_FIFO_SIZE 16 -#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ) +#define CS42L43_SPI_ROOT_HZ 49152000 #define CS42L43_SPI_MAX_LENGTH 65532 enum cs42l43_spi_cmd { diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index c3e5cee18b..09b6c1b45f 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, ctrl |= (spi_imx->target_burst * 8 - 1) << MX51_ECSPI_CTRL_BL_OFFSET; else { - if (spi_imx->usedma) { - ctrl |= (spi_imx->bits_per_word - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; - } else { - if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST) - ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; - else - ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word, - BITS_PER_BYTE) * spi_imx->bits_per_word - 1) - << MX51_ECSPI_CTRL_BL_OFFSET; - } + ctrl |= (spi_imx->bits_per_word - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; } /* set clock speed */ diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index f1e922fd36..955c920c4b 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi) static int stm32_qspi_get_mode(u8 buswidth) { - if (buswidth == 4) + if (buswidth >= 4) return CCR_BUSWIDTH_4; return buswidth; @@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi) return -EINVAL; mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL); - if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) || - ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) && - gpiod_count(qspi->dev, "cs") == -ENOENT)) { + if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) { dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n"); dev_err(qspi->dev, "configuration not supported\n"); @@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi) qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN; /* - * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL - * are both set in spi->mode and "cs-gpios" properties is found in DT + * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL + * is set in spi->mode and "cs-gpios" properties is found in DT */ - if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) { + if (mode) { qspi->cr_reg |= CR_DFM; dev_dbg(qspi->dev, "Dual flash mode enable"); } diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 4a68abcdcc..4c4ff074e3 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -1016,8 +1016,10 @@ end_irq: static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id) { struct spi_controller *ctrl = dev_id; + struct stm32_spi *spi = spi_controller_get_devdata(ctrl); spi_finalize_current_transfer(ctrl); + stm32fx_spi_disable(spi); return IRQ_HANDLED; } @@ -1055,7 +1057,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP; if (!(sr & mask)) { - dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", + dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", sr, ier); spin_unlock_irqrestore(&spi->lock, flags); return IRQ_NONE; @@ -1185,8 +1187,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl, ~clrb) | setb, spi->base + spi->cfg->regs->cpol.reg); - stm32_spi_enable(spi); - spin_unlock_irqrestore(&spi->lock, flags); return 0; @@ -1204,6 +1204,7 @@ static void stm32fx_spi_dma_tx_cb(void *data) if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { spi_finalize_current_transfer(spi->ctrl); + stm32fx_spi_disable(spi); } } @@ -1218,6 +1219,7 @@ static void stm32_spi_dma_rx_cb(void *data) struct stm32_spi *spi = data; spi_finalize_current_transfer(spi->ctrl); + spi->cfg->disable(spi); } /** @@ -1305,6 +1307,8 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi) stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2); + stm32_spi_enable(spi); + /* starting data transfer when buffer is loaded */ if (spi->tx_buf) spi->cfg->write_tx(spi); @@ -1341,6 +1345,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi) spin_lock_irqsave(&spi->lock, flags); + stm32_spi_enable(spi); + /* Be sure to have data in fifo before starting data transfer */ if (spi->tx_buf) stm32h7_spi_write_txfifo(spi); @@ -1372,6 +1378,8 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi) */ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE); } + + stm32_spi_enable(spi); } /** @@ -1405,6 +1413,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier); + stm32_spi_enable(spi); + if (STM32_SPI_HOST_MODE(spi)) stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index a2c467d9e9..c349d60126 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -685,10 +685,12 @@ static int __spi_add_device(struct spi_device *spi) * Make sure that multiple logical CS doesn't map to the same physical CS. * For example, spi->chip_select[0] != spi->chip_select[1] and so on. */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); - if (status) - return status; + if (!spi_controller_is_target(ctlr)) { + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); + if (status) + return status; + } } /* Set the bus ID string */ @@ -1242,6 +1244,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) else rx_dev = ctlr->dev.parent; + ret = -ENOMSG; list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync is done before each transfer. */ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; @@ -1271,6 +1274,9 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) } } } + /* No transfer has been mapped, bail out with success */ + if (ret) + return 0; ctlr->cur_rx_dma_dev = rx_dev; ctlr->cur_tx_dma_dev = tx_dev; diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c index 674a350cc6..fa068b34b0 100644 --- a/drivers/spmi/hisi-spmi-controller.c +++ b/drivers/spmi/hisi-spmi-controller.c @@ -300,7 +300,6 @@ static int spmi_controller_probe(struct platform_device *pdev) spin_lock_init(&spmi_controller->lock); - ctrl->nr = spmi_controller->channel; ctrl->dev.parent = pdev->dev.parent; ctrl->dev.of_node = of_node_get(pdev->dev.of_node); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 9ed1180fe3..937c153245 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -1462,8 +1462,8 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); core = devm_ioremap(&ctrl->dev, res->start, resource_size(res)); - if (IS_ERR(core)) - return PTR_ERR(core); + if (!core) + return -ENOMEM; pmic_arb->core_size = resource_size(res); @@ -1495,15 +1495,15 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) "obsrvr"); pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start, resource_size(res)); - if (IS_ERR(pmic_arb->rd_base)) - return PTR_ERR(pmic_arb->rd_base); + if (!pmic_arb->rd_base) + return -ENOMEM; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "chnls"); pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start, resource_size(res)); - if (IS_ERR(pmic_arb->wr_base)) - return PTR_ERR(pmic_arb->wr_base); + if (!pmic_arb->wr_base) + return -ENOMEM; } pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS; diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 9f30e0edad..bdb6595ffd 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -341,11 +341,13 @@ static int ssb_bus_match(struct device *dev, struct device_driver *drv) static int ssb_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { - const struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + const struct ssb_device *ssb_dev; if (!dev) return -ENODEV; + ssb_dev = dev_to_ssb_dev(dev); + return add_uevent_var(env, "MODALIAS=ssb:v%04Xid%04Xrev%02X", ssb_dev->id.vendor, ssb_dev->id.coreid, diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c index 8541995008..aa6f266b62 100644 --- a/drivers/staging/greybus/arche-apb-ctrl.c +++ b/drivers/staging/greybus/arche-apb-ctrl.c @@ -466,6 +466,7 @@ static const struct of_device_id arche_apb_ctrl_of_match[] = { { .compatible = "usbffff,2", }, { }, }; +MODULE_DEVICE_TABLE(of, arche_apb_ctrl_of_match); static struct platform_driver arche_apb_ctrl_device_driver = { .probe = arche_apb_ctrl_probe, diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c index 891b75327d..b33977ccd5 100644 --- a/drivers/staging/greybus/arche-platform.c +++ b/drivers/staging/greybus/arche-platform.c @@ -619,14 +619,7 @@ static const struct of_device_id arche_platform_of_match[] = { { .compatible = "google,arche-platform", }, { }, }; - -static const struct of_device_id arche_combined_id[] = { - /* Use PID/VID of SVC device */ - { .compatible = "google,arche-platform", }, - { .compatible = "usbffff,2", }, - { }, -}; -MODULE_DEVICE_TABLE(of, arche_combined_id); +MODULE_DEVICE_TABLE(of, arche_platform_of_match); static struct platform_driver arche_platform_device_driver = { .probe = arche_platform_probe, diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index a5c2fe9638..00360f4a04 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -142,6 +142,9 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel) channel = get_channel_from_mode(channel->light, GB_CHANNEL_MODE_TORCH); + if (!channel) + return -EINVAL; + /* For not flash we need to convert brightness to intensity */ intensity = channel->intensity_uA.min + (channel->intensity_uA.step * channel->led->brightness); @@ -528,7 +531,10 @@ static int gb_lights_light_v4l2_register(struct gb_light *light) } channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH); - WARN_ON(!channel_flash); + if (!channel_flash) { + dev_err(dev, "failed to get flash channel from mode\n"); + return -EINVAL; + } fled = &channel_flash->fled; diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index 938a4ea89c..8c30191b21 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -4690,6 +4690,7 @@ static int load_video_binaries(struct ia_css_pipe *pipe) sizeof(struct ia_css_binary), GFP_KERNEL); if (!mycs->yuv_scaler_binary) { + mycs->num_yuv_scaler = 0; err = -ENOMEM; return err; } diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c index a587f86010..323aa70fde 100644 --- a/drivers/staging/media/starfive/camss/stf-camss.c +++ b/drivers/staging/media/starfive/camss/stf-camss.c @@ -162,6 +162,12 @@ err_isp_unregister: static void stfcamss_unregister_devs(struct stfcamss *stfcamss) { + struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV]; + struct stf_isp_dev *isp_dev = &stfcamss->isp_dev; + + media_entity_remove_links(&isp_dev->subdev.entity); + media_entity_remove_links(&cap_yuv->video.vdev.entity); + stf_isp_unregister(&stfcamss->isp_dev); stf_capture_unregister(stfcamss); } diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index 14e34eabc4..4a1bfebb1b 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -150,7 +150,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) { struct proc_thermal_pci *pci_info = devid; struct proc_thermal_device *proc_priv; - int ret = IRQ_HANDLED; + int ret = IRQ_NONE; u32 status; proc_priv = pci_info->proc_priv; @@ -175,6 +175,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) /* Disable enable interrupt flag */ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); pkg_thermal_schedule_work(&pci_info->work); + ret = IRQ_HANDLED; } pci_write_config_byte(pci_info->pdev, 0xdc, 0x01); diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index fd4bd650c7..6b9422bd87 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -740,7 +740,11 @@ static int lvts_golden_temp_init(struct device *dev, u32 *value, int temp_offset gt = (*value) >> 24; - if (gt && gt < LVTS_GOLDEN_TEMP_MAX) + /* A zero value for gt means that device has invalid efuse data */ + if (!gt) + return -ENODATA; + + if (gt < LVTS_GOLDEN_TEMP_MAX) golden_temp = gt; golden_temp_offset = golden_temp * 500 + temp_offset; @@ -1530,11 +1534,15 @@ static const struct lvts_data mt7988_lvts_ap_data = { static const struct lvts_data mt8192_lvts_mcu_data = { .lvts_ctrl = mt8192_lvts_mcu_data_ctrl, .num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_mcu_data_ctrl), + .temp_factor = LVTS_COEFF_A_MT8195, + .temp_offset = LVTS_COEFF_B_MT8195, }; static const struct lvts_data mt8192_lvts_ap_data = { .lvts_ctrl = mt8192_lvts_ap_data_ctrl, .num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_ap_data_ctrl), + .temp_factor = LVTS_COEFF_A_MT8195, + .temp_offset = LVTS_COEFF_B_MT8195, }; static const struct lvts_data mt8195_lvts_mcu_data = { diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index f6edb12ec0..5225b3621a 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -95,6 +95,9 @@ static int lmh_probe(struct platform_device *pdev) unsigned int enable_alg; u32 node_id; + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL); if (!lmh_data) return -ENOMEM; diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 6d7c16ccb4..4edee8d929 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -264,7 +264,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1, for (i = 0; i < priv->num_sensors; i++) { dev_dbg(priv->dev, "%s: sensor%d - data_point1:%#x data_point2:%#x\n", - __func__, i, p1[i], p2[i]); + __func__, i, p1[i], p2 ? p2[i] : 0); if (!priv->sensor[i].slope) priv->sensor[i].slope = SLOPE_DEFAULT; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 34a31bc720..28888ac43c 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -431,7 +431,6 @@ static void update_temperature(struct thermal_zone_device *tz) trace_thermal_temperature(tz); thermal_genl_sampling_temp(tz->id, temp); - thermal_debug_update_temp(tz); } static void thermal_zone_device_check(struct work_struct *work) @@ -475,6 +474,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, for_each_trip(tz, trip) handle_thermal_trip(tz, trip); + thermal_debug_update_temp(tz); + monitor_thermal_zone(tz); } @@ -897,6 +898,7 @@ __thermal_cooling_device_register(struct device_node *np, { struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; + unsigned long current_state; int id, ret; if (!ops || !ops->get_max_state || !ops->get_cur_state || @@ -934,6 +936,18 @@ __thermal_cooling_device_register(struct device_node *np, if (ret) goto out_cdev_type; + /* + * The cooling device's current state is only needed for debug + * initialization below, so a failure to get it does not cause + * the entire cooling device initialization to fail. However, + * the debug will not work for the device if its initial state + * cannot be determined and drivers are responsible for ensuring + * that this will not happen. + */ + ret = cdev->ops->get_cur_state(cdev, ¤t_state); + if (ret) + current_state = ULONG_MAX; + thermal_cooling_device_setup_sysfs(cdev); ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id); @@ -947,6 +961,9 @@ __thermal_cooling_device_register(struct device_node *np, return ERR_PTR(ret); } + if (current_state <= cdev->max_state) + thermal_debug_cdev_add(cdev, current_state); + /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); @@ -962,8 +979,6 @@ __thermal_cooling_device_register(struct device_node *np, mutex_unlock(&thermal_list_lock); - thermal_debug_cdev_add(cdev); - return cdev; out_cooling_dev: @@ -1618,6 +1633,12 @@ static int thermal_pm_notify(struct notifier_block *nb, static struct notifier_block thermal_pm_nb = { .notifier_call = thermal_pm_notify, + /* + * Run at the lowest priority to avoid interference between the thermal + * zone resume work items spawned by thermal_pm_notify() and the other + * PM notifiers. + */ + .priority = INT_MIN, }; static int __init thermal_init(void) diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c index 5693cc8b23..403f74d663 100644 --- a/drivers/thermal/thermal_debugfs.c +++ b/drivers/thermal/thermal_debugfs.c @@ -435,6 +435,14 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, } cdev_dbg->current_state = new_state; + + /* + * Create a record for the new state if it is not there, so its + * duration will be printed by cdev_dt_seq_show() as expected if it + * runs before the next state transition. + */ + thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, new_state); + transition = (old_state << 16) | new_state; /* @@ -460,8 +468,9 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, * Allocates a cooling device object for debug, initializes the * statistics and create the entries in sysfs. * @cdev: a pointer to a cooling device + * @state: current state of the cooling device */ -void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) +void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state) { struct thermal_debugfs *thermal_dbg; struct cdev_debugfs *cdev_dbg; @@ -478,9 +487,16 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) INIT_LIST_HEAD(&cdev_dbg->durations[i]); } - cdev_dbg->current_state = 0; + cdev_dbg->current_state = state; cdev_dbg->timestamp = ktime_get(); + /* + * Create a record for the initial cooling device state, so its + * duration will be printed by cdev_dt_seq_show() as expected if it + * runs before the first state transition. + */ + thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, state); + debugfs_create_file("trans_table", 0400, thermal_dbg->d_top, thermal_dbg, &tt_fops); @@ -555,7 +571,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, struct tz_episode *tze; struct tz_debugfs *tz_dbg; struct thermal_debugfs *thermal_dbg = tz->debugfs; - int temperature = tz->temperature; int trip_id = thermal_zone_trip_id(tz, trip); ktime_t now = ktime_get(); @@ -624,12 +639,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node); tze->trip_stats[trip_id].timestamp = now; - tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature); - tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature); - tze->trip_stats[trip_id].count++; - tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg + - (temperature - tze->trip_stats[trip_id].avg) / - tze->trip_stats[trip_id].count; unlock: mutex_unlock(&thermal_dbg->lock); diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h index 155b9af5fe..c28bd4c114 100644 --- a/drivers/thermal/thermal_debugfs.h +++ b/drivers/thermal/thermal_debugfs.h @@ -2,7 +2,7 @@ #ifdef CONFIG_THERMAL_DEBUGFS void thermal_debug_init(void); -void thermal_debug_cdev_add(struct thermal_cooling_device *cdev); +void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state); void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev); void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state); void thermal_debug_tz_add(struct thermal_zone_device *tz); @@ -14,7 +14,7 @@ void thermal_debug_tz_trip_down(struct thermal_zone_device *tz, void thermal_debug_update_temp(struct thermal_zone_device *tz); #else static inline void thermal_debug_init(void) {} -static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) {} +static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state) {} static inline void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev) {} static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state) {} diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index e324cd8997..0754fe76ed 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port) debugfs_create_file("run", 0600, dir, port, &margining_run_fops); debugfs_create_file("results", 0600, dir, port, &margining_results_fops); debugfs_create_file("test", 0600, dir, port, &margining_test_fops); - if (independent_voltage_margins(usb4) || - (supports_time(usb4) && independent_time_margins(usb4))) + if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL || + (supports_time(usb4) && + independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR)) debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops); } diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 4036566feb..afbf7837b5 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -245,16 +245,18 @@ enum gsm_encoding { enum gsm_mux_state { GSM_SEARCH, - GSM_START, - GSM_ADDRESS, - GSM_CONTROL, - GSM_LEN, - GSM_DATA, - GSM_FCS, - GSM_OVERRUN, - GSM_LEN0, - GSM_LEN1, - GSM_SSOF, + GSM0_ADDRESS, + GSM0_CONTROL, + GSM0_LEN0, + GSM0_LEN1, + GSM0_DATA, + GSM0_FCS, + GSM0_SSOF, + GSM1_START, + GSM1_ADDRESS, + GSM1_CONTROL, + GSM1_DATA, + GSM1_OVERRUN, }; /* @@ -2847,6 +2849,30 @@ invalid: return; } +/** + * gsm0_receive_state_check_and_fix - check and correct receive state + * @gsm: gsm data for this ldisc instance + * + * Ensures that the current receive state is valid for basic option mode. + */ + +static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm) +{ + switch (gsm->state) { + case GSM_SEARCH: + case GSM0_ADDRESS: + case GSM0_CONTROL: + case GSM0_LEN0: + case GSM0_LEN1: + case GSM0_DATA: + case GSM0_FCS: + case GSM0_SSOF: + break; + default: + gsm->state = GSM_SEARCH; + break; + } +} /** * gsm0_receive - perform processing for non-transparency @@ -2860,26 +2886,27 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c) { unsigned int len; + gsm0_receive_state_check_and_fix(gsm); switch (gsm->state) { case GSM_SEARCH: /* SOF marker */ if (c == GSM0_SOF) { - gsm->state = GSM_ADDRESS; + gsm->state = GSM0_ADDRESS; gsm->address = 0; gsm->len = 0; gsm->fcs = INIT_FCS; } break; - case GSM_ADDRESS: /* Address EA */ + case GSM0_ADDRESS: /* Address EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) - gsm->state = GSM_CONTROL; + gsm->state = GSM0_CONTROL; break; - case GSM_CONTROL: /* Control Byte */ + case GSM0_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; - gsm->state = GSM_LEN0; + gsm->state = GSM0_LEN0; break; - case GSM_LEN0: /* Length EA */ + case GSM0_LEN0: /* Length EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->len, c)) { if (gsm->len > gsm->mru) { @@ -2889,14 +2916,14 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c) } gsm->count = 0; if (!gsm->len) - gsm->state = GSM_FCS; + gsm->state = GSM0_FCS; else - gsm->state = GSM_DATA; + gsm->state = GSM0_DATA; break; } - gsm->state = GSM_LEN1; + gsm->state = GSM0_LEN1; break; - case GSM_LEN1: + case GSM0_LEN1: gsm->fcs = gsm_fcs_add(gsm->fcs, c); len = c; gsm->len |= len << 7; @@ -2907,26 +2934,29 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c) } gsm->count = 0; if (!gsm->len) - gsm->state = GSM_FCS; + gsm->state = GSM0_FCS; else - gsm->state = GSM_DATA; + gsm->state = GSM0_DATA; break; - case GSM_DATA: /* Data */ + case GSM0_DATA: /* Data */ gsm->buf[gsm->count++] = c; - if (gsm->count == gsm->len) { + if (gsm->count >= MAX_MRU) { + gsm->bad_size++; + gsm->state = GSM_SEARCH; + } else if (gsm->count >= gsm->len) { /* Calculate final FCS for UI frames over all data */ if ((gsm->control & ~PF) != UIH) { gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->count); } - gsm->state = GSM_FCS; + gsm->state = GSM0_FCS; } break; - case GSM_FCS: /* FCS follows the packet */ + case GSM0_FCS: /* FCS follows the packet */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); - gsm->state = GSM_SSOF; + gsm->state = GSM0_SSOF; break; - case GSM_SSOF: + case GSM0_SSOF: gsm->state = GSM_SEARCH; if (c == GSM0_SOF) gsm_queue(gsm); @@ -2940,6 +2970,29 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c) } /** + * gsm1_receive_state_check_and_fix - check and correct receive state + * @gsm: gsm data for this ldisc instance + * + * Ensures that the current receive state is valid for advanced option mode. + */ + +static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm) +{ + switch (gsm->state) { + case GSM_SEARCH: + case GSM1_START: + case GSM1_ADDRESS: + case GSM1_CONTROL: + case GSM1_DATA: + case GSM1_OVERRUN: + break; + default: + gsm->state = GSM_SEARCH; + break; + } +} + +/** * gsm1_receive - perform processing for non-transparency * @gsm: gsm data for this ldisc instance * @c: character @@ -2949,6 +3002,7 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c) static void gsm1_receive(struct gsm_mux *gsm, u8 c) { + gsm1_receive_state_check_and_fix(gsm); /* handle XON/XOFF */ if ((c & ISO_IEC_646_MASK) == XON) { gsm->constipated = true; @@ -2961,11 +3015,11 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c) } if (c == GSM1_SOF) { /* EOF is only valid in frame if we have got to the data state */ - if (gsm->state == GSM_DATA) { + if (gsm->state == GSM1_DATA) { if (gsm->count < 1) { /* Missing FSC */ gsm->malformed++; - gsm->state = GSM_START; + gsm->state = GSM1_START; return; } /* Remove the FCS from data */ @@ -2981,14 +3035,14 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c) gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); gsm->len = gsm->count; gsm_queue(gsm); - gsm->state = GSM_START; + gsm->state = GSM1_START; return; } /* Any partial frame was a runt so go back to start */ - if (gsm->state != GSM_START) { + if (gsm->state != GSM1_START) { if (gsm->state != GSM_SEARCH) gsm->malformed++; - gsm->state = GSM_START; + gsm->state = GSM1_START; } /* A SOF in GSM_START means we are still reading idling or framing bytes */ @@ -3009,30 +3063,30 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c) gsm->escape = false; } switch (gsm->state) { - case GSM_START: /* First byte after SOF */ + case GSM1_START: /* First byte after SOF */ gsm->address = 0; - gsm->state = GSM_ADDRESS; + gsm->state = GSM1_ADDRESS; gsm->fcs = INIT_FCS; fallthrough; - case GSM_ADDRESS: /* Address continuation */ + case GSM1_ADDRESS: /* Address continuation */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) - gsm->state = GSM_CONTROL; + gsm->state = GSM1_CONTROL; break; - case GSM_CONTROL: /* Control Byte */ + case GSM1_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; gsm->count = 0; - gsm->state = GSM_DATA; + gsm->state = GSM1_DATA; break; - case GSM_DATA: /* Data */ - if (gsm->count > gsm->mru) { /* Allow one for the FCS */ - gsm->state = GSM_OVERRUN; + case GSM1_DATA: /* Data */ + if (gsm->count > gsm->mru || gsm->count > MAX_MRU) { /* Allow one for the FCS */ + gsm->state = GSM1_OVERRUN; gsm->bad_size++; } else gsm->buf[gsm->count++] = c; break; - case GSM_OVERRUN: /* Over-long - eg a dropped SOF */ + case GSM1_OVERRUN: /* Over-long - eg a dropped SOF */ break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index f252d0b5a4..5e9ca4376d 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, else if (ldata->raw || (L_EXTPROC(tty) && !preops)) n_tty_receive_buf_raw(tty, cp, fp, count); else if (tty->closing && !L_EXTPROC(tty)) { - if (la_count > 0) + if (la_count > 0) { n_tty_receive_buf_closing(tty, cp, fp, la_count, true); - if (count > la_count) - n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false); + cp += la_count; + if (fp) + fp += la_count; + count -= la_count; + } + if (count > 0) + n_tty_receive_buf_closing(tty, cp, fp, count, false); } else { - if (la_count > 0) + if (la_count > 0) { n_tty_receive_buf_standard(tty, cp, fp, la_count, true); - if (count > la_count) - n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false); + cp += la_count; + if (fp) + fp += la_count; + count -= la_count; + } + if (count > 0) + n_tty_receive_buf_standard(tty, cp, fp, count, false); flush_echoes(tty); if (tty->ops->flush_chars) diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index 5daa38d9c6..61d81b11f6 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -675,18 +675,46 @@ static void init_real_clk_rates(struct device *dev, struct brcmuart_priv *priv) clk_set_rate(priv->baud_mux_clk, priv->default_mux_rate); } +static u32 find_quot(struct device *dev, u32 freq, u32 baud, u32 *percent) +{ + u32 quot; + u32 rate; + u64 hires_rate; + u64 hires_baud; + u64 hires_err; + + rate = freq / 16; + quot = DIV_ROUND_CLOSEST(rate, baud); + if (!quot) + return 0; + + /* increase resolution to get xx.xx percent */ + hires_rate = div_u64((u64)rate * 10000, (u64)quot); + hires_baud = (u64)baud * 10000; + + /* get the delta */ + if (hires_rate > hires_baud) + hires_err = (hires_rate - hires_baud); + else + hires_err = (hires_baud - hires_rate); + + *percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud); + + dev_dbg(dev, "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n", + baud, freq, *percent / 100, *percent % 100); + + return quot; +} + static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv, u32 baud) { u32 percent; u32 best_percent = UINT_MAX; u32 quot; + u32 freq; u32 best_quot = 1; - u32 rate; - int best_index = -1; - u64 hires_rate; - u64 hires_baud; - u64 hires_err; + u32 best_freq = 0; int rc; int i; int real_baud; @@ -695,44 +723,35 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv, if (priv->baud_mux_clk == NULL) return; - /* Find the closest match for specified baud */ - for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) { - if (priv->real_rates[i] == 0) - continue; - rate = priv->real_rates[i] / 16; - quot = DIV_ROUND_CLOSEST(rate, baud); - if (!quot) - continue; - - /* increase resolution to get xx.xx percent */ - hires_rate = (u64)rate * 10000; - hires_baud = (u64)baud * 10000; - - hires_err = div_u64(hires_rate, (u64)quot); - - /* get the delta */ - if (hires_err > hires_baud) - hires_err = (hires_err - hires_baud); - else - hires_err = (hires_baud - hires_err); - - percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud); - dev_dbg(up->dev, - "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n", - baud, priv->real_rates[i], percent / 100, - percent % 100); - if (percent < best_percent) { - best_percent = percent; - best_index = i; - best_quot = quot; + /* Try default_mux_rate first */ + quot = find_quot(up->dev, priv->default_mux_rate, baud, &percent); + if (quot) { + best_percent = percent; + best_freq = priv->default_mux_rate; + best_quot = quot; + } + /* If more than 1% error, find the closest match for specified baud */ + if (best_percent > 100) { + for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) { + freq = priv->real_rates[i]; + if (freq == 0 || freq == priv->default_mux_rate) + continue; + quot = find_quot(up->dev, freq, baud, &percent); + if (!quot) + continue; + + if (percent < best_percent) { + best_percent = percent; + best_freq = freq; + best_quot = quot; + } } } - if (best_index == -1) { + if (!best_freq) { dev_err(up->dev, "Error, %d BAUD rate is too fast.\n", baud); return; } - rate = priv->real_rates[best_index]; - rc = clk_set_rate(priv->baud_mux_clk, rate); + rc = clk_set_rate(priv->baud_mux_clk, best_freq); if (rc) dev_err(up->dev, "Error selecting BAUD MUX clock\n"); @@ -741,8 +760,8 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv, dev_err(up->dev, "Error, baud: %d has %u.%u%% error\n", baud, percent / 100, percent % 100); - real_baud = rate / 16 / best_quot; - dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", rate); + real_baud = best_freq / 16 / best_quot; + dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", best_freq); dev_dbg(up->dev, "Requested baud: %u, Actual baud: %u\n", baud, real_baud); @@ -751,7 +770,7 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv, i += (i / 2); priv->char_wait = ns_to_ktime(i); - up->uartclk = rate; + up->uartclk = best_freq; } static void brcmstb_set_termios(struct uart_port *up, diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 1300c92b87..a58890fd53 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -55,6 +55,34 @@ #define DW_UART_QUIRK_SKIP_SET_RATE BIT(2) #define DW_UART_QUIRK_IS_DMA_FC BIT(3) #define DW_UART_QUIRK_APMC0D08 BIT(4) +#define DW_UART_QUIRK_CPR_VALUE BIT(5) + +struct dw8250_platform_data { + u8 usr_reg; + u32 cpr_value; + unsigned int quirks; +}; + +struct dw8250_data { + struct dw8250_port_data data; + const struct dw8250_platform_data *pdata; + + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct notifier_block clk_notifier; + struct work_struct clk_work; + struct reset_control *rst; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) +{ + return container_of(data, struct dw8250_data, data); +} static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb) { @@ -445,6 +473,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p) static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) { unsigned int quirks = data->pdata ? data->pdata->quirks : 0; + u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0; + + if (quirks & DW_UART_QUIRK_CPR_VALUE) + data->data.cpr_value = cpr_value; #ifdef CONFIG_64BIT if (quirks & DW_UART_QUIRK_OCTEON) { @@ -727,8 +759,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = { static const struct dw8250_platform_data dw8250_renesas_rzn1_data = { .usr_reg = DW_UART_USR, - .cpr_val = 0x00012f32, - .quirks = DW_UART_QUIRK_IS_DMA_FC, + .cpr_value = 0x00012f32, + .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC, }; static const struct dw8250_platform_data dw8250_starfive_jh7100_data = { diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c index 3e33ddf7bc..5a2520943d 100644 --- a/drivers/tty/serial/8250/8250_dwlib.c +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = { void dw8250_setup_port(struct uart_port *p) { struct dw8250_port_data *pd = p->private_data; - struct dw8250_data *data = to_dw8250_data(pd); struct uart_8250_port *up = up_to_u8250p(p); u32 reg, old_dlf; @@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p) reg = dw8250_readl_ext(p, DW_UART_CPR); if (!reg) { - reg = data->pdata->cpr_val; + reg = pd->cpr_value; dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg); } if (!reg) diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h index f13e91f2ca..7dd2a8e7b7 100644 --- a/drivers/tty/serial/8250/8250_dwlib.h +++ b/drivers/tty/serial/8250/8250_dwlib.h @@ -2,15 +2,10 @@ /* Synopsys DesignWare 8250 library header file. */ #include <linux/io.h> -#include <linux/notifier.h> #include <linux/types.h> -#include <linux/workqueue.h> #include "8250.h" -struct clk; -struct reset_control; - struct dw8250_port_data { /* Port properties */ int line; @@ -19,42 +14,16 @@ struct dw8250_port_data { struct uart_8250_dma dma; /* Hardware configuration */ + u32 cpr_value; u8 dlf_size; /* RS485 variables */ bool hw_rs485_support; }; -struct dw8250_platform_data { - u8 usr_reg; - u32 cpr_val; - unsigned int quirks; -}; - -struct dw8250_data { - struct dw8250_port_data data; - const struct dw8250_platform_data *pdata; - - int msr_mask_on; - int msr_mask_off; - struct clk *clk; - struct clk *pclk; - struct notifier_block clk_notifier; - struct work_struct clk_work; - struct reset_control *rst; - - unsigned int skip_autocfg:1; - unsigned int uart_16550_compatible:1; -}; - void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old); void dw8250_setup_port(struct uart_port *p); -static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) -{ - return container_of(data, struct dw8250_data, data); -} - static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) { if (p->iotype == UPIO_MEM32BE) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 0440df7de1..4d1e07343d 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -46,8 +46,50 @@ #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 +#define PCI_VENDOR_ID_CONNECT_TECH 0x12c4 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_SP_OPTO 0x0340 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_A 0x0341 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_B 0x0342 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS 0x0350 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_A 0x0351 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_B 0x0352 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS 0x0353 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_A 0x0354 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_B 0x0355 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS_OPTO 0x0360 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_A 0x0361 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_B 0x0362 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP 0x0370 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232 0x0371 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_485 0x0372 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_SP 0x0373 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_6_2_SP 0x0374 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_6_SP 0x0375 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232_NS 0x0376 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_LEFT 0x0380 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_RIGHT 0x0381 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XP_OPTO 0x0382 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_XPRS_OPTO 0x0392 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP 0x03A0 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232 0x03A1 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_485 0x03A2 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232_NS 0x03A3 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XEG001 0x0602 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_BASE 0x1000 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_2 0x1002 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_4 0x1004 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_8 0x1008 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_12 0x100C +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_16 0x1010 +#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG00X 0x110c +#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG01X 0x110d +#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_16 0x1110 + #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 +#define PCI_DEVICE_ID_EXAR_XR17V252 0x0252 +#define PCI_DEVICE_ID_EXAR_XR17V254 0x0254 +#define PCI_DEVICE_ID_EXAR_XR17V258 0x0258 #define PCI_SUBDEVICE_ID_USR_2980 0x0128 #define PCI_SUBDEVICE_ID_USR_2981 0x0129 diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index 9ff6bbe9c0..d14988d149 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -209,15 +209,19 @@ static int mtk8250_startup(struct uart_port *port) static void mtk8250_shutdown(struct uart_port *port) { -#ifdef CONFIG_SERIAL_8250_DMA struct uart_8250_port *up = up_to_u8250p(port); struct mtk8250_data *data = port->private_data; + int irq = data->rx_wakeup_irq; +#ifdef CONFIG_SERIAL_8250_DMA if (up->dma) data->rx_status = DMA_RX_SHUTDOWN; #endif - return serial8250_do_shutdown(port); + serial8250_do_shutdown(port); + + if (irq >= 0) + serial8250_do_set_mctrl(&up->port, TIOCM_RTS); } static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask) diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c index f1a51b00b1..ba96fa913e 100644 --- a/drivers/tty/serial/8250/8250_pxa.c +++ b/drivers/tty/serial/8250/8250_pxa.c @@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev) uart.port.iotype = UPIO_MEM32; uart.port.regshift = 2; uart.port.fifosize = 64; + uart.tx_loadsz = 32; uart.dl_write = serial_pxa_dl_write; ret = serial8250_register_8250_port(&uart); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index e148132506..09c1678ddf 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/dma-mapping.h> #include <asm/irq.h> @@ -2010,7 +2011,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count) struct imx_port *sport = imx_uart_ports[co->index]; struct imx_port_ucrs old_ucr; unsigned long flags; - unsigned int ucr1; + unsigned int ucr1, usr2; int locked = 1; if (sport->port.sysrq) @@ -2041,8 +2042,8 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count) * Finally, wait for transmitter to become empty * and restore UCR1/2/3 */ - while (!(imx_uart_readl(sport, USR2) & USR2_TXDC)); - + read_poll_timeout_atomic(imx_uart_readl, usr2, usr2 & USR2_TXDC, + 0, USEC_PER_SEC, false, sport, USR2); imx_uart_ucrs_restore(sport, &old_ucr); if (locked) diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index 5efb2b593b..3d2b83d6ab 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -45,6 +45,9 @@ #include <linux/freezer.h> #include <linux/tty.h> #include <linux/tty_flip.h> +#include <linux/types.h> + +#include <asm/unaligned.h> #include <linux/serial_max3100.h> @@ -191,7 +194,7 @@ static void max3100_timeout(struct timer_list *t) static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) { struct spi_message message; - u16 etx, erx; + __be16 etx, erx; int status; struct spi_transfer tran = { .tx_buf = &etx, @@ -213,7 +216,7 @@ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) return 0; } -static int max3100_handlerx(struct max3100_port *s, u16 rx) +static int max3100_handlerx_unlocked(struct max3100_port *s, u16 rx) { unsigned int status = 0; int ret = 0, cts; @@ -254,6 +257,17 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx) return ret; } +static int max3100_handlerx(struct max3100_port *s, u16 rx) +{ + unsigned long flags; + int ret; + + uart_port_lock_irqsave(&s->port, &flags); + ret = max3100_handlerx_unlocked(s, rx); + uart_port_unlock_irqrestore(&s->port, flags); + return ret; +} + static void max3100_work(struct work_struct *w) { struct max3100_port *s = container_of(w, struct max3100_port, work); @@ -738,13 +752,14 @@ static int max3100_probe(struct spi_device *spi) mutex_lock(&max3100s_lock); if (!uart_driver_registered) { - uart_driver_registered = 1; retval = uart_register_driver(&max3100_uart_driver); if (retval) { printk(KERN_ERR "Couldn't register max3100 uart driver\n"); mutex_unlock(&max3100s_lock); return retval; } + + uart_driver_registered = 1; } for (i = 0; i < MAX_MAX3100; i++) @@ -830,6 +845,7 @@ static void max3100_remove(struct spi_device *spi) } pr_debug("removing max3100 driver\n"); uart_unregister_driver(&max3100_uart_driver); + uart_driver_registered = 0; mutex_unlock(&max3100s_lock); } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 929206a9a6..ace2c4b333 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/property.h> #include <linux/regmap.h> +#include <linux/sched.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/tty.h> @@ -25,7 +26,6 @@ #include <linux/spi/spi.h> #include <linux/uaccess.h> #include <linux/units.h> -#include <uapi/linux/sched/types.h> #define SC16IS7XX_NAME "sc16is7xx" #define SC16IS7XX_MAX_DEVS 8 @@ -554,16 +554,28 @@ static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg) return reg == SC16IS7XX_RHR_REG; } +/* + * Configure programmable baud rate generator (divisor) according to the + * desired baud rate. + * + * From the datasheet, the divisor is computed according to: + * + * XTAL1 input frequency + * ----------------------- + * prescaler + * divisor = --------------------------- + * baud-rate x sampling-rate + */ static int sc16is7xx_set_baud(struct uart_port *port, int baud) { struct sc16is7xx_one *one = to_sc16is7xx_one(port, port); u8 lcr; - u8 prescaler = 0; + unsigned int prescaler = 1; unsigned long clk = port->uartclk, div = clk / 16 / baud; if (div >= BIT(16)) { - prescaler = SC16IS7XX_MCR_CLKSEL_BIT; - div /= 4; + prescaler = 4; + div /= prescaler; } /* Enable enhanced features */ @@ -573,9 +585,10 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) SC16IS7XX_EFR_ENABLE_BIT); sc16is7xx_efr_unlock(port); + /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_CLKSEL_BIT, - prescaler); + prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT); /* Backup LCR and access special register set (DLL/DLH) */ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG); @@ -591,7 +604,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) /* Restore LCR and access to general register set */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); - return DIV_ROUND_CLOSEST(clk / 16, div); + return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div); } static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c index 7e3a1c7b09..66fd117d8a 100644 --- a/drivers/tty/serial/serial_port.c +++ b/drivers/tty/serial/serial_port.c @@ -63,6 +63,13 @@ static int serial_port_runtime_suspend(struct device *dev) if (port->flags & UPF_DEAD) return 0; + /* + * Nothing to do on pm_runtime_force_suspend(), see + * DEFINE_RUNTIME_DEV_PM_OPS. + */ + if (!pm_runtime_enabled(dev)) + return 0; + uart_port_lock_irqsave(port, &flags); if (!port_dev->tx_enabled) { uart_port_unlock_irqrestore(port, flags); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index e512eaa57e..a6f3517dce 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1271,9 +1271,14 @@ static void sci_dma_rx_chan_invalidate(struct sci_port *s) static void sci_dma_rx_release(struct sci_port *s) { struct dma_chan *chan = s->chan_rx_saved; + struct uart_port *port = &s->port; + unsigned long flags; + uart_port_lock_irqsave(port, &flags); s->chan_rx_saved = NULL; sci_dma_rx_chan_invalidate(s); + uart_port_unlock_irqrestore(port, flags); + dmaengine_terminate_sync(chan); dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0], sg_dma_address(&s->sg_rx[0])); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 3f68e213df..d80e9d4c97 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -545,6 +545,12 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) goto out; } + if (tty->ops->ldisc_ok) { + retval = tty->ops->ldisc_ok(tty, disc); + if (retval) + goto out; + } + old_ldisc = tty->ldisc; /* Shutdown the old discipline. */ diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 9b5b98dfc8..cd87e3d129 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -3576,6 +3576,15 @@ static void con_cleanup(struct tty_struct *tty) tty_port_put(&vc->port); } +/* + * We can't deal with anything but the N_TTY ldisc, + * because we can sleep in our write() routine. + */ +static int con_ldisc_ok(struct tty_struct *tty, int ldisc) +{ + return ldisc == N_TTY ? 0 : -EINVAL; +} + static int default_color = 7; /* white */ static int default_italic_color = 2; // green (ASCII) static int default_underline_color = 3; // cyan (ASCII) @@ -3695,6 +3704,7 @@ static const struct tty_operations con_ops = { .resize = vt_resize, .shutdown = con_shutdown, .cleanup = con_cleanup, + .ldisc_ok = con_ldisc_ok, }; static struct cdev vc0_cdev; diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 768bf87cd8..8944548c30 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -601,8 +601,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA; while (sq_head_slot != hwq->sq_tail_slot) { - utrd = hwq->sqe_base_addr + - sq_head_slot * sizeof(struct utp_transfer_req_desc); + utrd = hwq->sqe_base_addr + sq_head_slot; match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA; if (addr == match) { ufshcd_mcq_nullify_sqe(utrd); @@ -635,20 +634,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; unsigned long flags; - int err = FAILED; + int err; if (!ufshcd_cmd_inflight(lrbp->cmd)) { dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", __func__, tag); - goto out; + return FAILED; } /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", __func__, tag); - goto out; + return FAILED; } hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); @@ -660,7 +659,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) */ dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n", __func__, hwq->id, tag); - goto out; + return FAILED; } /* @@ -668,18 +667,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) * in the completion queue either. Query the device to see if * the command is being processed in the device. */ - if (ufshcd_try_to_abort_task(hba, tag)) { + err = ufshcd_try_to_abort_task(hba, tag); + if (err) { dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err); lrbp->req_abort_skip = true; - goto out; + return FAILED; } - err = SUCCESS; spin_lock_irqsave(&hwq->cq_lock, flags); if (ufshcd_cmd_inflight(lrbp->cmd)) ufshcd_release_scsi_cmd(hba, lrbp); spin_unlock_irqrestore(&hwq->cq_lock, flags); -out: - return err; + return SUCCESS; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index a0f8e93016..f7d04f7c00 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1392,7 +1392,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ - ufshcd_scsi_block_requests(hba); + blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1401,7 +1401,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) ret = -EBUSY; up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); goto out; } @@ -1422,7 +1422,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); } @@ -4289,7 +4289,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) * Make sure UIC command completion interrupt is disabled before * issuing UIC command. */ - wmb(); + ufshcd_readl(hba, REG_INTERRUPT_ENABLE); reenable_intr = true; } spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -8972,6 +8972,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { /* Reset the device and controller before doing reinit */ ufshcd_device_reset(hba); + ufs_put_device_desc(hba); ufshcd_hba_stop(hba); ufshcd_vops_reinit_notify(hba); ret = ufshcd_hba_enable(hba); @@ -10400,7 +10401,7 @@ int ufshcd_system_restore(struct device *dev) * are updated with the latest queue addresses. Only after * updating these addresses, we can queue the new commands. */ - mb(); + ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H); /* Resuming from hibernate, assume that link was OFF */ ufshcd_set_link_off(hba); @@ -10621,7 +10622,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) * Make sure that UFS interrupts are disabled and any pending interrupt * status is cleared before registering UFS interrupt handler. */ - mb(); + ufshcd_readl(hba, REG_INTERRUPT_ENABLE); /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c index bb30267da4..66811d8d19 100644 --- a/drivers/ufs/host/cdns-pltfrm.c +++ b/drivers/ufs/host/cdns-pltfrm.c @@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) * Make sure the register was updated, * UniPro layer will not work with an incorrect value. */ - mb(); + ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV); return 0; } diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 7a00004bfd..62c343444d 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -284,9 +284,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) if (host->hw_ver.major >= 0x05) ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); - - /* make sure above configuration is applied before we return */ - mb(); } /* @@ -415,7 +412,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ - mb(); + ufshcd_readl(hba, REG_UFS_CFG2); } static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, @@ -507,7 +504,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, * make sure above write gets applied before we return from * this function. */ - mb(); + ufshcd_readl(hba, REG_UFS_SYS1CLK_1US); } return 0; diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 9dd9a391eb..b9de170983 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -151,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1); /* - * Make sure assertion of ufs phy reset is written to - * register before returning + * Dummy read to ensure the write takes effect before doing any sort + * of delay */ - mb(); + ufshcd_readl(hba, REG_UFS_CFG1); } static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) @@ -162,10 +162,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1); /* - * Make sure de-assertion of ufs phy reset is written to - * register before returning + * Dummy read to ensure the write takes effect before doing any sort + * of delay */ - mb(); + ufshcd_readl(hba, REG_UFS_CFG1); } /* Host controller hardware version: major.minor.step */ diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 3a9a0dd4be..949eca0ade 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_FSL_USB2) += host/ obj-$(CONFIG_USB_FOTG210_HCD) += host/ obj-$(CONFIG_USB_MAX3421_HCD) += host/ +obj-$(CONFIG_USB_XEN_HCD) += host/ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index c553decb54..6830be4419 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb) dev_err(&desc->intf->dev, "Stall on int endpoint\n"); goto sw; /* halt is cleared in work */ default: - dev_err(&desc->intf->dev, + dev_err_ratelimited(&desc->intf->dev, "nonzero urb status received: %d\n", status); break; } } if (urb->actual_length < sizeof(struct usb_cdc_notification)) { - dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n", + dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n", urb->actual_length); goto exit; } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index c0e005670d..fb1aa0d4fc 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb) struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); struct usb_anchor *anchor = urb->anchor; int status = urb->unlinked; + unsigned long flags; urb->hcpriv = NULL; if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && @@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb) /* pass ownership to the completion handler */ urb->status = status; /* - * This function can be called in task context inside another remote - * coverage collection section, but kcov doesn't support that kind of - * recursion yet. Only collect coverage in softirq context for now. + * Only collect coverage in the softirq context and disable interrupts + * to avoid scenarios with nested remote coverage collection sections + * that KCOV does not support. + * See the comment next to kcov_remote_start_usb_softirq() for details. */ - kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); + flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); urb->complete(urb); - kcov_remote_stop_softirq(); + kcov_remote_stop_softirq(flags); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 497deed38c..9ef821ca2f 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -8,6 +8,7 @@ * Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ +#include <linux/dmi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -220,6 +221,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc, if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) { struct gpio_desc *gpio; + const char *bios_ver; int ret; /* On BYT the FW does not always enable the refclock */ @@ -277,8 +279,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc, * detection. These can be identified by them _not_ * using the standard ACPI battery and ac drivers. */ + bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); if (acpi_dev_present("INT33FD", "1", 2) && - acpi_quirk_skip_acpi_ac_and_battery()) { + acpi_quirk_skip_acpi_ac_and_battery() && + /* Lenovo Yoga Tablet 2 Pro 1380 uses LC824206XA instead */ + !(bios_ver && + strstarts(bios_ver, "BLADE_21.X64.0005.R00.1504101516"))) { dev_info(&pdev->dev, "Using TUSB1211 phy for charger detection\n"); swnode = &dwc3_pci_intel_phy_charger_detect_swnode; } diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c index 958fc40eae..0655afe7f9 100644 --- a/drivers/usb/fotg210/fotg210-core.c +++ b/drivers/usb/fotg210/fotg210-core.c @@ -95,6 +95,7 @@ static int fotg210_gemini_init(struct fotg210 *fotg, struct resource *res, /** * fotg210_vbus() - Called by gadget driver to enable/disable VBUS + * @fotg: pointer to a private fotg210 object * @enable: true to enable VBUS, false to disable VBUS */ void fotg210_vbus(struct fotg210 *fotg, bool enable) diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 3c8a9dd585..2db01e03bf 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -1029,9 +1029,9 @@ static inline int hidg_get_minor(void) { int ret; - ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); + ret = ida_alloc(&hidg_ida, GFP_KERNEL); if (ret >= HIDG_MINORS) { - ida_simple_remove(&hidg_ida, ret); + ida_free(&hidg_ida, ret); ret = -ENODEV; } @@ -1176,7 +1176,7 @@ static const struct config_item_type hid_func_type = { static inline void hidg_put_minor(int minor) { - ida_simple_remove(&hidg_ida, minor); + ida_free(&hidg_ida, minor); } static void hidg_free_inst(struct usb_function_instance *f) diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 076dd4c1be..ba7d180cc9 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1312,9 +1312,9 @@ static inline int gprinter_get_minor(void) { int ret; - ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + ret = ida_alloc(&printer_ida, GFP_KERNEL); if (ret >= PRINTER_MINORS) { - ida_simple_remove(&printer_ida, ret); + ida_free(&printer_ida, ret); ret = -ENODEV; } @@ -1323,7 +1323,7 @@ static inline int gprinter_get_minor(void) static inline void gprinter_put_minor(int minor) { - ida_simple_remove(&printer_ida, minor); + ida_free(&printer_ida, minor); } static int gprinter_setup(int); diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 29bf8664bf..12c5d9cf45 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -869,12 +869,12 @@ EXPORT_SYMBOL_GPL(rndis_msg_parser); static inline int rndis_get_nr(void) { - return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL); + return ida_alloc_max(&rndis_ida, 999, GFP_KERNEL); } static inline void rndis_put_nr(int nr) { - ida_simple_remove(&rndis_ida, nr); + ida_free(&rndis_ida, nr); } struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index 4a42574b4a..ec1dceb087 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -57,13 +57,13 @@ struct uac_rtd_params { /* Volume/Mute controls and their state */ int fu_id; /* Feature Unit ID */ - struct snd_kcontrol *snd_kctl_volume; - struct snd_kcontrol *snd_kctl_mute; + struct snd_ctl_elem_id snd_kctl_volume_id; + struct snd_ctl_elem_id snd_kctl_mute_id; s16 volume_min, volume_max, volume_res; s16 volume; int mute; - struct snd_kcontrol *snd_kctl_rate; /* read-only current rate */ + struct snd_ctl_elem_id snd_kctl_rate_id; /* read-only current rate */ int srate; /* selected samplerate */ int active; /* playback/capture running */ @@ -494,14 +494,13 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep) static void set_active(struct uac_rtd_params *prm, bool active) { // notifying through the Rate ctrl - struct snd_kcontrol *kctl = prm->snd_kctl_rate; unsigned long flags; spin_lock_irqsave(&prm->lock, flags); if (prm->active != active) { prm->active = active; snd_ctl_notify(prm->uac->card, SNDRV_CTL_EVENT_MASK_VALUE, - &kctl->id); + &prm->snd_kctl_rate_id); } spin_unlock_irqrestore(&prm->lock, flags); } @@ -807,7 +806,7 @@ int u_audio_set_volume(struct g_audio *audio_dev, int playback, s16 val) if (change) snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE, - &prm->snd_kctl_volume->id); + &prm->snd_kctl_volume_id); return 0; } @@ -856,7 +855,7 @@ int u_audio_set_mute(struct g_audio *audio_dev, int playback, int val) if (change) snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE, - &prm->snd_kctl_mute->id); + &prm->snd_kctl_mute_id); return 0; } @@ -1331,7 +1330,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, err = snd_ctl_add(card, kctl); if (err < 0) goto snd_fail; - prm->snd_kctl_mute = kctl; + prm->snd_kctl_mute_id = kctl->id; prm->mute = 0; } @@ -1359,7 +1358,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, err = snd_ctl_add(card, kctl); if (err < 0) goto snd_fail; - prm->snd_kctl_volume = kctl; + prm->snd_kctl_volume_id = kctl->id; prm->volume = fu->volume_max; prm->volume_max = fu->volume_max; prm->volume_min = fu->volume_min; @@ -1383,7 +1382,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name, err = snd_ctl_add(card, kctl); if (err < 0) goto snd_fail; - prm->snd_kctl_rate = kctl; + prm->snd_kctl_rate_id = kctl->id; } strscpy(card->driver, card_name, sizeof(card->driver)); @@ -1420,6 +1419,8 @@ void g_audio_cleanup(struct g_audio *g_audio) return; uac = g_audio->uac; + g_audio->uac = NULL; + card = uac->card; if (card) snd_card_free_when_closed(card); diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index a4377df612..6fac696ea8 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -13,6 +13,7 @@ #include "uvc_configfs.h" #include <linux/sort.h> +#include <linux/usb/uvc.h> #include <linux/usb/video.h> /* ----------------------------------------------------------------------------- @@ -2260,6 +2261,8 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item, struct f_uvc_opts *opts; struct config_item *opts_item; struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex; + const struct uvc_format_desc *format; + u8 tmpguidFormat[sizeof(ch->desc.guidFormat)]; int ret; mutex_lock(su_mutex); /* for navigating configfs hierarchy */ @@ -2273,7 +2276,16 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item, goto end; } - memcpy(ch->desc.guidFormat, page, + memcpy(tmpguidFormat, page, + min(sizeof(tmpguidFormat), len)); + + format = uvc_format_by_guid(tmpguidFormat); + if (!format) { + ret = -EINVAL; + goto end; + } + + memcpy(ch->desc.guidFormat, tmpguidFormat, min(sizeof(ch->desc.guidFormat), len)); ret = sizeof(ch->desc.guidFormat); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 69dd866698..990008aebe 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2269,24 +2269,24 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) } static struct xhci_interrupter * -xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags) +xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) { struct device *dev = xhci_to_hcd(xhci)->self.sysdev; struct xhci_interrupter *ir; - unsigned int num_segs = segs; + unsigned int max_segs; int ret; + if (!segs) + segs = ERST_DEFAULT_SEGS; + + max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2)); + segs = min(segs, max_segs); + ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev)); if (!ir) return NULL; - /* number of ring segments should be greater than 0 */ - if (segs <= 0) - num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2), - ERST_MAX_SEGS); - - ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0, - flags); + ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags); if (!ir->event_ring) { xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); kfree(ir); @@ -2344,7 +2344,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, } struct xhci_interrupter * -xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg) +xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_interrupter *ir; @@ -2354,7 +2354,7 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg) if (!xhci->interrupters || xhci->max_interrupters <= 1) return NULL; - ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL); + ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL); if (!ir) return NULL; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 93b6976480..febf647234 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -36,6 +36,7 @@ #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_EJ168 0x7023 +#define PCI_DEVICE_ID_EJ188 0x7052 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -270,17 +271,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) "QUIRK: Fresco Logic revision %u " "has broken MSI implementation", pdev->revision); - xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100) - xhci->quirks |= XHCI_TRUST_TX_LENGTH; - if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; @@ -307,11 +303,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_RESET_ON_RESUME; } - if (pdev->vendor == PCI_VENDOR_ID_AMD) { - xhci->quirks |= XHCI_TRUST_TX_LENGTH; - if (pdev->device == 0x43f7) - xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; - } + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43f7) + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if ((pdev->vendor == PCI_VENDOR_ID_AMD) && ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || @@ -399,12 +392,16 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_EJ168) { xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) { + xhci->quirks |= XHCI_RESET_ON_RESUME; + xhci->quirks |= XHCI_BROKEN_STREAMS; + } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { - xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_ZERO_64B_REGS; } if (pdev->vendor == PCI_VENDOR_ID_RENESAS && @@ -434,7 +431,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { - xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_NO_64BIT_SUPPORT; } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index ab9c5969e4..8b35764772 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -214,8 +214,7 @@ static int xhci_rcar_resume_quirk(struct usb_hcd *hcd) */ #define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \ .firmware_name = firmware, \ - .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \ - XHCI_SLOW_SUSPEND, \ + .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND, \ .init_quirk = xhci_rcar_init_quirk, \ .plat_start = xhci_rcar_start, \ .resume_quirk = xhci_rcar_resume_quirk, @@ -229,8 +228,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { }; static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = { - .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | - XHCI_SLOW_SUSPEND, + .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND, .init_quirk = xhci_rzv2m_init_quirk, .plat_start = xhci_rzv2m_start, }; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 575f0fd9c9..48d745e9f9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1034,13 +1034,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) break; case TD_DIRTY: /* TD is cached, clear it */ case TD_HALTED: + case TD_CLEARING_CACHE_DEFERRED: + if (cached_td) { + if (cached_td->urb->stream_id != td->urb->stream_id) { + /* Multiple streams case, defer move dq */ + xhci_dbg(xhci, + "Move dq deferred: stream %u URB %p\n", + td->urb->stream_id, td->urb); + td->cancel_status = TD_CLEARING_CACHE_DEFERRED; + break; + } + + /* Should never happen, but clear the TD if it does */ + xhci_warn(xhci, + "Found multiple active URBs %p and %p in stream %u?\n", + td->urb, cached_td->urb, + td->urb->stream_id); + td_to_noop(xhci, ring, cached_td, false); + cached_td->cancel_status = TD_CLEARED; + } + td->cancel_status = TD_CLEARING_CACHE; - if (cached_td) - /* FIXME stream case, several stopped rings */ - xhci_dbg(xhci, - "Move dq past stream %u URB %p instead of stream %u URB %p\n", - td->urb->stream_id, td->urb, - cached_td->urb->stream_id, cached_td->urb); cached_td = td; break; } @@ -1060,10 +1074,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) if (err) { /* Failed to move past cached td, just set cached TDs to no-op */ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { - if (td->cancel_status != TD_CLEARING_CACHE) + /* + * Deferred TDs need to have the deq pointer set after the above command + * completes, so if that failed we just give up on all of them (and + * complain loudly since this could cause issues due to caching). + */ + if (td->cancel_status != TD_CLEARING_CACHE && + td->cancel_status != TD_CLEARING_CACHE_DEFERRED) continue; - xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", - td->urb); + xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", + td->urb); td_to_noop(xhci, ring, td, false); td->cancel_status = TD_CLEARED; } @@ -1350,6 +1370,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_td *td, *tmp_td; + bool deferred = false; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); @@ -1436,6 +1457,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", __func__, td->urb); xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); + } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) { + deferred = true; } else { xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", __func__, td->urb, td->cancel_status); @@ -1445,8 +1468,17 @@ cleanup: ep->ep_state &= ~SET_DEQ_PENDING; ep->queued_deq_seg = NULL; ep->queued_deq_ptr = NULL; - /* Restart any rings with pending URBs */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + + if (deferred) { + /* We have more streams to clear */ + xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", + __func__); + xhci_invalidate_cancelled_tds(ep); + } else { + /* Restart any rings with pending URBs */ + xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } } static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, @@ -2399,8 +2431,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, break; if (remaining) { frame->status = short_framestatus; - if (xhci->quirks & XHCI_TRUST_TX_LENGTH) - sum_trbs_for_length = true; + sum_trbs_for_length = true; break; } frame->status = 0; @@ -2535,9 +2566,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, goto finish_td; case COMP_STOPPED_LENGTH_INVALID: /* stopped on ep trb with invalid length, exclude it */ - ep_trb_len = 0; - remaining = 0; - break; + td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb); + goto finish_td; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || (ep->err_count++ > MAX_SOFT_RETRY) || @@ -2650,15 +2680,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, * transfer type */ case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) - break; - if (xhci->quirks & XHCI_TRUST_TX_LENGTH || - ep_ring->last_td_was_short) + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { trb_comp_code = COMP_SHORT_PACKET; - else - xhci_warn_ratelimited(xhci, - "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n", - slot_id, ep_index); + xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n", + slot_id, ep_index, ep_ring->last_td_was_short); + } break; case COMP_SHORT_PACKET: break; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 6f4bf98a62..1683d779e4 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status { TD_DIRTY = 0, TD_HALTED, TD_CLEARING_CACHE, + TD_CLEARING_CACHE_DEFERRED, TD_CLEARED, }; @@ -1392,8 +1393,8 @@ struct urb_priv { struct xhci_td td[] __counted_by(num_tds); }; -/* Reasonable limit for number of Event Ring segments (spec allows 32k) */ -#define ERST_MAX_SEGS 2 +/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */ +#define ERST_DEFAULT_SEGS 2 /* Poll every 60 seconds */ #define POLL_TIMEOUT 60 /* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */ @@ -1589,7 +1590,7 @@ struct xhci_hcd { #define XHCI_RESET_ON_RESUME BIT_ULL(7) #define XHCI_SW_BW_CHECKING BIT_ULL(8) #define XHCI_AMD_0x96_HOST BIT_ULL(9) -#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) +#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) /* Deprecated */ #define XHCI_LPM_SUPPORT BIT_ULL(11) #define XHCI_INTEL_HOST BIT_ULL(12) #define XHCI_SPURIOUS_REBOOT BIT_ULL(13) @@ -1729,8 +1730,6 @@ static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci) dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_warn(xhci, fmt, args...) \ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args) -#define xhci_warn_ratelimited(xhci, fmt, args...) \ - dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_info(xhci, fmt, args...) \ dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args) @@ -1833,7 +1832,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); struct xhci_interrupter * -xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg); +xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs); void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir); diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index b00d92db5d..eb5a8e0d9e 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -677,7 +677,7 @@ static int uss720_probe(struct usb_interface *intf, struct parport_uss720_private *priv; struct parport *pp; unsigned char reg; - int i; + int ret; dev_dbg(&intf->dev, "probe: vendor id 0x%x, device id 0x%x\n", le16_to_cpu(usbdev->descriptor.idVendor), @@ -688,8 +688,8 @@ static int uss720_probe(struct usb_interface *intf, usb_put_dev(usbdev); return -ENODEV; } - i = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2); - dev_dbg(&intf->dev, "set interface result %d\n", i); + ret = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2); + dev_dbg(&intf->dev, "set interface result %d\n", ret); interface = intf->cur_altsetting; @@ -725,12 +725,18 @@ static int uss720_probe(struct usb_interface *intf, set_1284_register(pp, 7, 0x00, GFP_KERNEL); set_1284_register(pp, 6, 0x30, GFP_KERNEL); /* PS/2 mode */ set_1284_register(pp, 2, 0x0c, GFP_KERNEL); - /* debugging */ - get_1284_register(pp, 0, ®, GFP_KERNEL); + + /* The Belkin F5U002 Rev 2 P80453-B USB parallel port adapter shares the + * device ID 050d:0002 with some other device that works with this + * driver, but it itself does not. Detect and handle the bad cable + * here. */ + ret = get_1284_register(pp, 0, ®, GFP_KERNEL); dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg); + if (ret < 0) + return ret; - i = usb_find_last_int_in_endpoint(interface, &epd); - if (!i) { + ret = usb_find_last_int_in_endpoint(interface, &epd); + if (!ret) { dev_dbg(&intf->dev, "epaddr %d interval %d\n", epd->bEndpointAddress, epd->bInterval); } diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 115f05a620..40d34cc283 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -105,6 +105,8 @@ struct alauda_info { unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ + + bool media_initialized; }; #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) ) @@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us) } /* Check for media change */ - if (status[0] & 0x08) { + if (status[0] & 0x08 || !info->media_initialized) { usb_stor_dbg(us, "Media change detected\n"); alauda_free_maps(&MEDIA_INFO(us)); - alauda_init_media(us); - + rc = alauda_init_media(us); + if (rc == USB_STOR_TRANSPORT_GOOD) + info->media_initialized = true; info->sense_key = UNIT_ATTENTION; info->sense_asc = 0x28; info->sense_ascq = 0x00; diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 12cf9940e5..7d87e2a521 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -86,6 +86,12 @@ static int slave_alloc (struct scsi_device *sdev) if (us->protocol == USB_PR_BULK && us->max_lun > 0) sdev->sdev_bflags |= BLIST_FORCELUN; + /* + * Some USB storage devices reset if the IO advice hints grouping mode + * page is queried. Hence skip that mode page. + */ + sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; + return 0; } diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 08953f0d45..1ed2bdda0c 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -21,6 +21,7 @@ #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> +#include <scsi/scsi_devinfo.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> @@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev) struct uas_dev_info *devinfo = (struct uas_dev_info *)sdev->host->hostdata; + /* + * Some USB storage devices reset if the IO advice hints grouping mode + * page is queried. Hence skip that mode page. + */ + sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; + sdev->hostdata = devinfo; /* diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c index d3958c061a..501eddb294 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c @@ -41,7 +41,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; const struct pmic_typec_resources *res; struct regmap *regmap; - struct device *bridge_dev; + struct auxiliary_device *bridge_dev; u32 base; int ret; @@ -92,7 +92,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev) if (!tcpm->tcpc.fwnode) return -EINVAL; - bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode)); + bridge_dev = devm_drm_dp_hpd_bridge_alloc(tcpm->dev, to_of_node(tcpm->tcpc.fwnode)); if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); @@ -110,8 +110,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev) if (ret) goto port_stop; + ret = devm_drm_dp_hpd_bridge_add(tcpm->dev, bridge_dev); + if (ret) + goto pdphy_stop; + return 0; +pdphy_stop: + tcpm->pdphy_stop(tcpm); port_stop: tcpm->port_stop(tcpm); port_unregister: diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 8a1af08f71..5d4da962ac 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port) memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps); caps.role = TYPEC_SOURCE; - if (cap) + if (cap) { usb_power_delivery_unregister_capabilities(cap); + port->partner_source_caps = NULL; + } cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps); if (IS_ERR(cap)) @@ -6172,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port) port->tcpc->set_bist_data(port->tcpc, false); switch (port->state) { + case TOGGLING: case ERROR_RECOVERY: case PORT_RESET: case PORT_RESET_WAIT_OFF: diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index bd6ae92aa3..7801501837 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -619,7 +619,8 @@ static int ucsi_read_pdos(struct ucsi_connector *con, u64 command; int ret; - if (ucsi->quirks & UCSI_NO_PARTNER_PDOS) + if (is_partner && + ucsi->quirks & UCSI_NO_PARTNER_PDOS) return 0; command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num); @@ -823,12 +824,6 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con) return PTR_ERR(cap); con->partner_source_caps = cap; - - ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd); - if (ret) { - usb_power_delivery_unregister_capabilities(con->partner_source_caps); - return ret; - } } ret = ucsi_get_pdos(con, TYPEC_SINK, 1, caps.pdo); @@ -843,15 +838,9 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con) return PTR_ERR(cap); con->partner_sink_caps = cap; - - ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd); - if (ret) { - usb_power_delivery_unregister_capabilities(con->partner_sink_caps); - return ret; - } } - return 0; + return typec_partner_set_usb_power_delivery(con->partner, con->partner_pd); } static void ucsi_unregister_partner_pdos(struct ucsi_connector *con) @@ -1572,7 +1561,6 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con) } con->port_source_caps = pd_cap; - typec_port_set_usb_power_delivery(con->port, con->pd); } memset(&pd_caps, 0, sizeof(pd_caps)); @@ -1589,9 +1577,10 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con) } con->port_sink_caps = pd_cap; - typec_port_set_usb_power_delivery(con->port, con->pd); } + typec_port_set_usb_power_delivery(con->port, con->pd); + /* Alternate modes */ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON); if (ret) { diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index ce08eb33e5..1d0e2d87e9 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -176,7 +176,8 @@ static int pmic_glink_ucsi_sync_write(struct ucsi *__ucsi, unsigned int offset, left = wait_for_completion_timeout(&ucsi->sync_ack, 5 * HZ); if (!left) { dev_err(ucsi->dev, "timeout waiting for UCSI sync write response\n"); - ret = -ETIMEDOUT; + /* return 0 here and let core UCSI code handle the CCI_BUSY */ + ret = 0; } else if (ucsi->sync_val) { dev_err(ucsi->dev, "sync write returned: %d\n", ucsi->sync_val); } @@ -243,10 +244,7 @@ static void pmic_glink_ucsi_notify(struct work_struct *work) ucsi_connector_change(ucsi->ucsi, con_num); } - if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) { - ucsi->sync_val = -EBUSY; - complete(&ucsi->sync_ack); - } else if (ucsi->sync_pending && + if (ucsi->sync_pending && (cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))) { complete(&ucsi->sync_ack); } @@ -311,12 +309,14 @@ static void pmic_glink_ucsi_destroy(void *data) mutex_unlock(&ucsi->lock); } +static unsigned long quirk_sc8180x = UCSI_NO_PARTNER_PDOS; + static const struct of_device_id pmic_glink_ucsi_of_quirks[] = { - { .compatible = "qcom,qcm6490-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, - { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, - { .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, - { .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, - { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, }, + { .compatible = "qcom,qcm6490-pmic-glink", .data = &quirk_sc8180x, }, + { .compatible = "qcom,sc8180x-pmic-glink", .data = &quirk_sc8180x, }, + { .compatible = "qcom,sc8280xp-pmic-glink", .data = &quirk_sc8180x, }, + { .compatible = "qcom,sm8350-pmic-glink", .data = &quirk_sc8180x, }, + { .compatible = "qcom,sm8550-pmic-glink", .data = &quirk_sc8180x, }, {} }; @@ -354,7 +354,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev, match = of_match_device(pmic_glink_ucsi_of_quirks, dev->parent); if (match) - ucsi->ucsi->quirks = (unsigned long)match->data; + ucsi->ucsi->quirks = *(unsigned long *)match->data; ucsi_set_drvdata(ucsi->ucsi, ucsi); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index d94d61b92c..d8c95cc16b 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -778,25 +778,26 @@ static int vfio_pci_count_devs(struct pci_dev *pdev, void *data) } struct vfio_pci_fill_info { - struct vfio_pci_dependent_device __user *devices; - struct vfio_pci_dependent_device __user *devices_end; struct vfio_device *vdev; + struct vfio_pci_dependent_device *devices; + int nr_devices; u32 count; u32 flags; }; static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data) { - struct vfio_pci_dependent_device info = { - .segment = pci_domain_nr(pdev->bus), - .bus = pdev->bus->number, - .devfn = pdev->devfn, - }; + struct vfio_pci_dependent_device *info; struct vfio_pci_fill_info *fill = data; - fill->count++; - if (fill->devices >= fill->devices_end) - return 0; + /* The topology changed since we counted devices */ + if (fill->count >= fill->nr_devices) + return -EAGAIN; + + info = &fill->devices[fill->count++]; + info->segment = pci_domain_nr(pdev->bus); + info->bus = pdev->bus->number; + info->devfn = pdev->devfn; if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) { struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev); @@ -809,19 +810,19 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data) */ vdev = vfio_find_device_in_devset(dev_set, &pdev->dev); if (!vdev) { - info.devid = VFIO_PCI_DEVID_NOT_OWNED; + info->devid = VFIO_PCI_DEVID_NOT_OWNED; } else { int id = vfio_iommufd_get_dev_id(vdev, iommufd); if (id > 0) - info.devid = id; + info->devid = id; else if (id == -ENOENT) - info.devid = VFIO_PCI_DEVID_OWNED; + info->devid = VFIO_PCI_DEVID_OWNED; else - info.devid = VFIO_PCI_DEVID_NOT_OWNED; + info->devid = VFIO_PCI_DEVID_NOT_OWNED; } /* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */ - if (info.devid == VFIO_PCI_DEVID_NOT_OWNED) + if (info->devid == VFIO_PCI_DEVID_NOT_OWNED) fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED; } else { struct iommu_group *iommu_group; @@ -830,13 +831,10 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data) if (!iommu_group) return -EPERM; /* Cannot reset non-isolated devices */ - info.group_id = iommu_group_id(iommu_group); + info->group_id = iommu_group_id(iommu_group); iommu_group_put(iommu_group); } - if (copy_to_user(fill->devices, &info, sizeof(info))) - return -EFAULT; - fill->devices++; return 0; } @@ -1258,10 +1256,11 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info( { unsigned long minsz = offsetofend(struct vfio_pci_hot_reset_info, count); + struct vfio_pci_dependent_device *devices = NULL; struct vfio_pci_hot_reset_info hdr; struct vfio_pci_fill_info fill = {}; bool slot = false; - int ret = 0; + int ret, count; if (copy_from_user(&hdr, arg, minsz)) return -EFAULT; @@ -1277,9 +1276,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info( else if (pci_probe_reset_bus(vdev->pdev->bus)) return -ENODEV; - fill.devices = arg->devices; - fill.devices_end = arg->devices + - (hdr.argsz - sizeof(hdr)) / sizeof(arg->devices[0]); + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs, + &count, slot); + if (ret) + return ret; + + if (count > (hdr.argsz - sizeof(hdr)) / sizeof(*devices)) { + hdr.count = count; + ret = -ENOSPC; + goto header; + } + + devices = kcalloc(count, sizeof(*devices), GFP_KERNEL); + if (!devices) + return -ENOMEM; + + fill.devices = devices; + fill.nr_devices = count; fill.vdev = &vdev->vdev; if (vfio_device_cdev_opened(&vdev->vdev)) @@ -1291,16 +1304,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info( &fill, slot); mutex_unlock(&vdev->vdev.dev_set->lock); if (ret) - return ret; + goto out; + + if (copy_to_user(arg->devices, devices, + sizeof(*devices) * fill.count)) { + ret = -EFAULT; + goto out; + } hdr.count = fill.count; hdr.flags = fill.flags; - if (copy_to_user(arg, &hdr, minsz)) - return -EFAULT; - if (fill.count > fill.devices - arg->devices) - return -ENOSPC; - return 0; +header: + if (copy_to_user(arg, &hdr, minsz)) + ret = -EFAULT; +out: + kfree(devices); + return ret; } static int diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index fb5392b749..e80c5d75b5 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -277,8 +277,10 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev, return -ENOMEM; ctx = vfio_irq_ctx_alloc(vdev, 0); - if (!ctx) + if (!ctx) { + kfree(name); return -ENOMEM; + } ctx->name = name; ctx->trigger = trigger; diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c index c80a1481e7..4e98e60417 100644 --- a/drivers/video/backlight/mp3309c.c +++ b/drivers/video/backlight/mp3309c.c @@ -205,8 +205,9 @@ static int mp3309c_parse_fwnode(struct mp3309c_chip *chip, struct mp3309c_platform_data *pdata) { int ret, i; - unsigned int num_levels, tmp_value; + unsigned int tmp_value; struct device *dev = chip->dev; + int num_levels; if (!dev_fwnode(dev)) return dev_err_probe(dev, -ENODEV, "failed to get firmware node\n"); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 197b6d5268..3f46663aa5 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1648,8 +1648,8 @@ config FB_COBALT select FB_IOMEM_HELPERS config FB_SH7760 - bool "SH7760/SH7763/SH7720/SH7721 LCDC support" - depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \ + tristate "SH7760/SH7763/SH7720/SH7721 LCDC support" + depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \ || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721) select FB_IOMEM_HELPERS help diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig index db09fe87fc..0ab8848ba2 100644 --- a/drivers/video/fbdev/core/Kconfig +++ b/drivers/video/fbdev/core/Kconfig @@ -144,6 +144,12 @@ config FB_DMAMEM_HELPERS select FB_SYS_IMAGEBLIT select FB_SYSMEM_FOPS +config FB_DMAMEM_HELPERS_DEFERRED + bool + depends on FB_CORE + select FB_DEFERRED_IO + select FB_DMAMEM_HELPERS + config FB_IOMEM_FOPS tristate depends on FB_CORE diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index ebc9aeffdd..ac41f8f375 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c @@ -2276,7 +2276,10 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id) if (info->var.xres_virtual > 0x1000) info->var.xres_virtual = 0x1000; #endif - savagefb_check_var(&info->var, info); + err = savagefb_check_var(&info->var, info); + if (err) + goto failed; + savagefb_set_fix(info); /* diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index eb2297b375..d35d2cf999 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -1575,7 +1575,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl) */ info->fix = sh_mobile_lcdc_overlay_fix; snprintf(info->fix.id, sizeof(info->fix.id), - "SH Mobile LCDC Overlay %u", ovl->index); + "SHMobile ovl %u", ovl->index); info->fix.smem_start = ovl->dma_handle; info->fix.smem_len = ovl->fb_size; info->fix.line_length = ovl->pitch; diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c index a8fb41f1a2..0932907200 100644 --- a/drivers/video/fbdev/sis/init301.c +++ b/drivers/video/fbdev/sis/init301.c @@ -172,7 +172,7 @@ static const unsigned char SiS_HiTVGroup3_2[] = { }; /* 301C / 302ELV extended Part2 TV registers (4 tap scaler) */ - +#ifdef CONFIG_FB_SIS_315 static const unsigned char SiS_Part2CLVX_1[] = { 0x00,0x00, 0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F,0x7D,0x20,0x04,0x7F,0x7D,0x1F,0x06,0x7E, @@ -245,7 +245,6 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */ 0xFF,0xFF, }; -#ifdef CONFIG_FB_SIS_315 /* 661 et al LCD data structure (2.03.00) */ static const unsigned char SiS_LCDStruct661[] = { /* 1024x768 */ diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c index fa5d9ca6be..9c75de0656 100644 --- a/drivers/virt/acrn/mm.c +++ b/drivers/virt/acrn/mm.c @@ -155,43 +155,83 @@ int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) { struct vm_memory_region_batch *regions_info; - int nr_pages, i = 0, order, nr_regions = 0; + int nr_pages, i, order, nr_regions = 0; struct vm_memory_mapping *region_mapping; struct vm_memory_region_op *vm_region; struct page **pages = NULL, *page; void *remap_vaddr; int ret, pinned; u64 user_vm_pa; - unsigned long pfn; struct vm_area_struct *vma; if (!vm || !memmap) return -EINVAL; + /* Get the page number of the map region */ + nr_pages = memmap->len >> PAGE_SHIFT; + if (!nr_pages) + return -EINVAL; + mmap_read_lock(current->mm); vma = vma_lookup(current->mm, memmap->vma_base); if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) { + unsigned long start_pfn, cur_pfn; + spinlock_t *ptl; + bool writable; + pte_t *ptep; + if ((memmap->vma_base + memmap->len) > vma->vm_end) { mmap_read_unlock(current->mm); return -EINVAL; } - ret = follow_pfn(vma, memmap->vma_base, &pfn); + for (i = 0; i < nr_pages; i++) { + ret = follow_pte(vma->vm_mm, + memmap->vma_base + i * PAGE_SIZE, + &ptep, &ptl); + if (ret) + break; + + cur_pfn = pte_pfn(ptep_get(ptep)); + if (i == 0) + start_pfn = cur_pfn; + writable = !!pte_write(ptep_get(ptep)); + pte_unmap_unlock(ptep, ptl); + + /* Disallow write access if the PTE is not writable. */ + if (!writable && + (memmap->attr & ACRN_MEM_ACCESS_WRITE)) { + ret = -EFAULT; + break; + } + + /* Disallow refcounted pages. */ + if (pfn_valid(cur_pfn) && + !PageReserved(pfn_to_page(cur_pfn))) { + ret = -EFAULT; + break; + } + + /* Disallow non-contiguous ranges. */ + if (cur_pfn != start_pfn + i) { + ret = -EINVAL; + break; + } + } mmap_read_unlock(current->mm); - if (ret < 0) { + + if (ret) { dev_dbg(acrn_dev.this_device, "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base); return ret; } return acrn_mm_region_add(vm, memmap->user_vm_pa, - PFN_PHYS(pfn), memmap->len, + PFN_PHYS(start_pfn), memmap->len, ACRN_MEM_TYPE_WB, memmap->attr); } mmap_read_unlock(current->mm); - /* Get the page number of the map region */ - nr_pages = memmap->len >> PAGE_SHIFT; pages = vzalloc(array_size(nr_pages, sizeof(*pages))); if (!pages) return -ENOMEM; @@ -235,12 +275,11 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) mutex_unlock(&vm->regions_mapping_lock); /* Calculate count of vm_memory_region_op */ - while (i < nr_pages) { + for (i = 0; i < nr_pages; i += 1 << order) { page = pages[i]; VM_BUG_ON_PAGE(PageTail(page), page); order = compound_order(page); nr_regions++; - i += 1 << order; } /* Prepare the vm_memory_region_batch */ @@ -257,8 +296,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) regions_info->vmid = vm->vmid; regions_info->regions_gpa = virt_to_phys(vm_region); user_vm_pa = memmap->user_vm_pa; - i = 0; - while (i < nr_pages) { + for (i = 0; i < nr_pages; i += 1 << order) { u32 region_size; page = pages[i]; @@ -274,7 +312,6 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap) vm_region++; user_vm_pa += region_size; - i += 1 << order; } /* Inform the ACRN Hypervisor to set up EPT mappings */ diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1f5b3dd31f..89bc8da805 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -450,7 +450,7 @@ static void start_update_balloon_size(struct virtio_balloon *vb) vb->adjustment_signal_pending = true; if (!vb->adjustment_in_progress) { vb->adjustment_in_progress = true; - pm_stay_awake(vb->vdev->dev.parent); + pm_stay_awake(&vb->vdev->dev); } spin_unlock_irqrestore(&vb->adjustment_lock, flags); @@ -462,7 +462,7 @@ static void end_update_balloon_size(struct virtio_balloon *vb) spin_lock_irq(&vb->adjustment_lock); if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) { vb->adjustment_in_progress = false; - pm_relax(vb->vdev->dev.parent); + pm_relax(&vb->vdev->dev); } spin_unlock_irq(&vb->adjustment_lock); } @@ -1029,6 +1029,15 @@ static int virtballoon_probe(struct virtio_device *vdev) spin_lock_init(&vb->adjustment_lock); + /* + * The virtio balloon itself can't wake up the device, but it is + * responsible for processing wakeup events passed up from the transport + * layer. Wakeup sources don't support nesting/chaining calls, so we use + * our own wakeup source to ensure wakeup events are properly handled + * without trampling on the transport layer's wakeup source. + */ + device_set_wakeup_capable(&vb->vdev->dev, true); + virtio_device_ready(vdev); if (towards_target(vb)) diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index b655fccaf7..584af78165 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -348,8 +348,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vqs[i]); - if (err) + if (err) { + vp_del_vq(vqs[i]); goto error_find; + } } return 0; diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c index 4a20e07fbb..f00ea1b4e4 100644 --- a/drivers/watchdog/bd9576_wdt.c +++ b/drivers/watchdog/bd9576_wdt.c @@ -29,7 +29,6 @@ struct bd9576_wdt_priv { struct gpio_desc *gpiod_en; struct device *dev; struct regmap *regmap; - bool always_running; struct watchdog_device wdd; }; @@ -62,10 +61,7 @@ static int bd9576_wdt_stop(struct watchdog_device *wdd) { struct bd9576_wdt_priv *priv = watchdog_get_drvdata(wdd); - if (!priv->always_running) - bd9576_wdt_disable(priv); - else - set_bit(WDOG_HW_RUNNING, &wdd->status); + bd9576_wdt_disable(priv); return 0; } @@ -264,9 +260,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev) if (ret) return ret; - priv->always_running = device_property_read_bool(dev->parent, - "always-running"); - watchdog_set_drvdata(&priv->wdd, priv); priv->wdd.info = &bd957x_wdt_ident; @@ -281,9 +274,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev) watchdog_stop_on_reboot(&priv->wdd); - if (priv->always_running) - bd9576_wdt_start(&priv->wdd); - return devm_watchdog_register_device(dev, &priv->wdd); } diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c index 688b112e71..9f279c0e13 100644 --- a/drivers/watchdog/cpu5wdt.c +++ b/drivers/watchdog/cpu5wdt.c @@ -252,7 +252,7 @@ static void cpu5wdt_exit(void) if (cpu5wdt_device.queue) { cpu5wdt_device.queue = 0; wait_for_completion(&cpu5wdt_device.stop); - del_timer(&cpu5wdt_device.timer); + timer_shutdown_sync(&cpu5wdt_device.timer); } misc_deregister(&cpu5wdt_misc); diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index 9215793a1c..4895a69015 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -59,6 +59,8 @@ #define PON_REASON_EOF_NUM 0xCCCCBBBB #define RESERVED_MEM_MIN_SIZE 12 +#define MAX_HW_ERROR 250 + static int heartbeat = DEFAULT_HEARTBEAT; /* @@ -97,7 +99,7 @@ static int rti_wdt_start(struct watchdog_device *wdd) * to be 50% or less than that; we obviouly want to configure the open * window as large as possible so we select the 50% option. */ - wdd->min_hw_heartbeat_ms = 500 * wdd->timeout; + wdd->min_hw_heartbeat_ms = 520 * wdd->timeout + MAX_HW_ERROR; /* Generate NMI when wdt expires */ writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL); @@ -131,31 +133,33 @@ static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize) * be petted during the open window; not too early or not too late. * The HW configuration options only allow for the open window size * to be 50% or less than that. + * To avoid any glitches, we accommodate 2% + max hardware error + * safety margin. */ switch (wsize) { case RTIWWDSIZE_50P: - /* 50% open window => 50% min heartbeat */ - wdd->min_hw_heartbeat_ms = 500 * heartbeat; + /* 50% open window => 52% min heartbeat */ + wdd->min_hw_heartbeat_ms = 520 * heartbeat + MAX_HW_ERROR; break; case RTIWWDSIZE_25P: - /* 25% open window => 75% min heartbeat */ - wdd->min_hw_heartbeat_ms = 750 * heartbeat; + /* 25% open window => 77% min heartbeat */ + wdd->min_hw_heartbeat_ms = 770 * heartbeat + MAX_HW_ERROR; break; case RTIWWDSIZE_12P5: - /* 12.5% open window => 87.5% min heartbeat */ - wdd->min_hw_heartbeat_ms = 875 * heartbeat; + /* 12.5% open window => 89.5% min heartbeat */ + wdd->min_hw_heartbeat_ms = 895 * heartbeat + MAX_HW_ERROR; break; case RTIWWDSIZE_6P25: - /* 6.5% open window => 93.5% min heartbeat */ - wdd->min_hw_heartbeat_ms = 935 * heartbeat; + /* 6.5% open window => 95.5% min heartbeat */ + wdd->min_hw_heartbeat_ms = 955 * heartbeat + MAX_HW_ERROR; break; case RTIWWDSIZE_3P125: - /* 3.125% open window => 96.9% min heartbeat */ - wdd->min_hw_heartbeat_ms = 969 * heartbeat; + /* 3.125% open window => 98.9% min heartbeat */ + wdd->min_hw_heartbeat_ms = 989 * heartbeat + MAX_HW_ERROR; break; default: @@ -233,14 +237,6 @@ static int rti_wdt_probe(struct platform_device *pdev) return -EINVAL; } - /* - * If watchdog is running at 32k clock, it is not accurate. - * Adjust frequency down in this case so that we don't pet - * the watchdog too often. - */ - if (wdt->freq < 32768) - wdt->freq = wdt->freq * 9 / 10; - pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index 5d2df008b9..34a917221e 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c @@ -191,9 +191,8 @@ static int sa1100dog_probe(struct platform_device *pdev) if (!res) return -ENXIO; reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - ret = PTR_ERR_OR_ZERO(reg_base); - if (ret) - return ret; + if (!reg_base) + return -ENOMEM; clk = clk_get(NULL, "OSTIMER0"); if (IS_ERR(clk)) { diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 3205e5d724..1a9ded0cdd 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -65,13 +65,17 @@ #include "xenbus.h" -static int xs_init_irq; +static int xs_init_irq = -1; int xen_store_evtchn; EXPORT_SYMBOL_GPL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; EXPORT_SYMBOL_GPL(xen_store_interface); +#define XS_INTERFACE_READY \ + ((xen_store_interface != NULL) && \ + (xen_store_interface->connection == XENSTORE_CONNECTED)) + enum xenstore_init xen_store_domain_type; EXPORT_SYMBOL_GPL(xen_store_domain_type); @@ -751,19 +755,19 @@ static void xenbus_probe(void) { xenstored_ready = 1; - if (!xen_store_interface) { + if (!xen_store_interface) xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB); - /* - * Now it is safe to free the IRQ used for xenstore late - * initialization. No need to unbind: it is about to be - * bound again from xb_init_comms. Note that calling - * unbind_from_irqhandler now would result in xen_evtchn_close() - * being called and the event channel not being enabled again - * afterwards, resulting in missed event notifications. - */ + /* + * Now it is safe to free the IRQ used for xenstore late + * initialization. No need to unbind: it is about to be + * bound again from xb_init_comms. Note that calling + * unbind_from_irqhandler now would result in xen_evtchn_close() + * being called and the event channel not being enabled again + * afterwards, resulting in missed event notifications. + */ + if (xs_init_irq >= 0) free_irq(xs_init_irq, &xb_waitq); - } /* * In the HVM case, xenbus_init() deferred its call to @@ -822,7 +826,7 @@ static int __init xenbus_probe_initcall(void) if (xen_store_domain_type == XS_PV || (xen_store_domain_type == XS_HVM && !xs_hvm_defer_init_for_callback() && - xen_store_interface != NULL)) + XS_INTERFACE_READY)) xenbus_probe(); /* @@ -831,7 +835,7 @@ static int __init xenbus_probe_initcall(void) * started, then probe. It will be triggered when communication * starts happening, by waiting on xb_waitq. */ - if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) { + if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) { struct task_struct *probe_task; probe_task = kthread_run(xenbus_probe_thread, NULL, @@ -1014,6 +1018,12 @@ static int __init xenbus_init(void) xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB); + if (!xen_store_interface) { + pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n", + __func__, v); + err = -EINVAL; + goto out_error; + } if (xen_store_interface->connection != XENSTORE_CONNECTED) wait = true; } |