diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
commit | 3afb00d3f86d3d924f88b56fa8285d4e9db85852 (patch) | |
tree | 95a985d3019522cea546b7d8df621369bc44fc6c /include | |
parent | Adding debian version 6.9.12-1. (diff) | |
download | linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.tar.xz linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include')
678 files changed, 14256 insertions, 5759 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index e7796f373d..1a4dfd7a1c 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -9,8 +9,13 @@ #ifndef __ACPI_BUS_H__ #define __ACPI_BUS_H__ +#include <linux/completion.h> +#include <linux/container_of.h> #include <linux/device.h> +#include <linux/kobject.h> +#include <linux/mutex.h> #include <linux/property.h> +#include <linux/types.h> struct acpi_handle_list { u32 count; @@ -124,8 +129,8 @@ static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile( } struct acpi_scan_handler { - const struct acpi_device_id *ids; struct list_head list_node; + const struct acpi_device_id *ids; bool (*match)(const char *idstr, const struct acpi_device_id **matchid); int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id); void (*detach)(struct acpi_device *dev); @@ -139,11 +144,15 @@ struct acpi_scan_handler { * -------------------- */ +typedef int (*acpi_hp_notify) (struct acpi_device *, u32); +typedef void (*acpi_hp_uevent) (struct acpi_device *, u32); +typedef void (*acpi_hp_fixup) (struct acpi_device *); + struct acpi_hotplug_context { struct acpi_device *self; - int (*notify)(struct acpi_device *, u32); - void (*uevent)(struct acpi_device *, u32); - void (*fixup)(struct acpi_device *); + acpi_hp_notify notify; + acpi_hp_uevent uevent; + acpi_hp_fixup fixup; }; /* @@ -170,7 +179,6 @@ struct acpi_driver { unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; - struct module *owner; }; /* @@ -269,6 +277,7 @@ struct acpi_device_power_flags { }; struct acpi_device_power_state { + struct list_head resources; /* Power resources referenced */ struct { u8 valid:1; u8 explicit_set:1; /* _PSx present? */ @@ -276,7 +285,6 @@ struct acpi_device_power_state { } flags; int power; /* % Power (compared to D0) */ int latency; /* Dx->D0 time (microseconds) */ - struct list_head resources; /* Power resources referenced */ }; struct acpi_device_power { @@ -342,16 +350,16 @@ struct acpi_device_wakeup { }; struct acpi_device_physical_node { - unsigned int node_id; struct list_head node; struct device *dev; + unsigned int node_id; bool put_online:1; }; struct acpi_device_properties { + struct list_head list; const guid_t *guid; union acpi_object *properties; - struct list_head list; void **bufs; }; @@ -488,12 +496,12 @@ struct acpi_device { /* Non-device subnode */ struct acpi_data_node { + struct list_head sibling; const char *name; acpi_handle handle; struct fwnode_handle fwnode; struct fwnode_handle *parent; struct acpi_device_data data; - struct list_head sibling; struct kobject kobj; struct completion kobj_done; }; @@ -578,8 +586,7 @@ static inline void acpi_set_hp_context(struct acpi_device *adev, void acpi_initialize_hp_context(struct acpi_device *adev, struct acpi_hotplug_context *hp, - int (*notify)(struct acpi_device *, u32), - void (*uevent)(struct acpi_device *, u32)); + acpi_hp_notify notify, acpi_hp_uevent uevent); /* acpi_device.dev.bus == &acpi_bus_type */ extern const struct bus_type acpi_bus_type; @@ -656,7 +663,12 @@ void acpi_scan_lock_release(void); void acpi_lock_hp_context(void); void acpi_unlock_hp_context(void); int acpi_scan_add_handler(struct acpi_scan_handler *handler); -int acpi_bus_register_driver(struct acpi_driver *driver); +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define acpi_bus_register_driver(drv) \ + __acpi_bus_register_driver(drv, THIS_MODULE) +int __acpi_bus_register_driver(struct acpi_driver *driver, struct module *owner); void acpi_bus_unregister_driver(struct acpi_driver *driver); int acpi_bus_scan(acpi_handle handle); void acpi_bus_trim(struct acpi_device *start); @@ -978,11 +990,16 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev) { acpi_dev_put(adev); } + +int acpi_wait_for_acpi_ipmi(void); + #else /* CONFIG_ACPI */ static inline int register_acpi_bus_type(void *bus) { return 0; } static inline int unregister_acpi_bus_type(void *bus) { return 0; } +static inline int acpi_wait_for_acpi_ipmi(void) { return 0; } + #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index f5fbc15e56..80dc36f9d5 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20230628 +#define ACPI_CA_VERSION 0x20240322 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index a33375e055..841ef9f227 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -571,8 +571,6 @@ struct acpi_cedt_cxims { struct acpi_cedt_rdpas { struct acpi_cedt_header header; - u8 reserved1; - u16 length; u16 segment; u16 bdf; u8 protocol; @@ -1096,6 +1094,12 @@ enum acpi_einj_command_status { #define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9) #define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10) #define ACPI_EINJ_PLATFORM_FATAL (1<<11) +#define ACPI_EINJ_CXL_CACHE_CORRECTABLE (1<<12) +#define ACPI_EINJ_CXL_CACHE_UNCORRECTABLE (1<<13) +#define ACPI_EINJ_CXL_CACHE_FATAL (1<<14) +#define ACPI_EINJ_CXL_MEM_CORRECTABLE (1<<15) +#define ACPI_EINJ_CXL_MEM_UNCORRECTABLE (1<<16) +#define ACPI_EINJ_CXL_MEM_FATAL (1<<17) #define ACPI_EINJ_VENDOR_DEFINED (1<<31) /******************************************************************************* diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 9775384d61..ae747c89d9 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -47,6 +47,7 @@ #define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ #define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */ #define ACPI_SIG_RASF "RASF" /* RAS Feature table */ +#define ACPI_SIG_RAS2 "RAS2" /* RAS2 Feature table */ #define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */ #define ACPI_SIG_RHCT "RHCT" /* RISC-V Hart Capabilities Table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ @@ -77,8 +78,8 @@ * * AEST - Arm Error Source Table * - * Conforms to: ACPI for the Armv8 RAS Extensions 1.1 Platform Design Document - * September 2020. + * Conforms to: ACPI for the Armv8 RAS Extensions 1.1(Sep 2020) and + * 2.0(May 2023) Platform Design Document. * ******************************************************************************/ @@ -108,7 +109,9 @@ struct acpi_aest_hdr { #define ACPI_AEST_SMMU_ERROR_NODE 2 #define ACPI_AEST_VENDOR_ERROR_NODE 3 #define ACPI_AEST_GIC_ERROR_NODE 4 -#define ACPI_AEST_NODE_TYPE_RESERVED 5 /* 5 and above are reserved */ +#define ACPI_AEST_PCIE_ERROR_NODE 5 +#define ACPI_AEST_PROXY_ERROR_NODE 6 +#define ACPI_AEST_NODE_TYPE_RESERVED 7 /* 7 and above are reserved */ /* * AEST subtables (Error nodes) @@ -187,6 +190,12 @@ typedef struct acpi_aest_vendor { } acpi_aest_vendor; +struct acpi_aest_vendor_v2 { + char acpi_hid[8]; + u32 acpi_uid; + u8 vendor_specific_data[16]; +}; + /* 4: Gic Error */ typedef struct acpi_aest_gic { @@ -203,6 +212,18 @@ typedef struct acpi_aest_gic { #define ACPI_AEST_GIC_ITS 3 #define ACPI_AEST_GIC_RESERVED 4 /* 4 and above are reserved */ +/* 5: PCIe Error */ + +struct acpi_aest_pcie { + u32 iort_node_reference; +}; + +/* 6: Proxy Error */ + +struct acpi_aest_proxy { + u64 node_address; +}; + /* Node Interface Structure */ typedef struct acpi_aest_node_interface { @@ -218,11 +239,57 @@ typedef struct acpi_aest_node_interface { } acpi_aest_node_interface; +/* Node Interface Structure V2 */ + +struct acpi_aest_node_interface_header { + u8 type; + u8 group_format; + u8 reserved[2]; + u32 flags; + u64 address; + u32 error_record_index; + u32 error_record_count; +}; + +#define ACPI_AEST_NODE_GROUP_FORMAT_4K 0 +#define ACPI_AEST_NODE_GROUP_FORMAT_16K 1 +#define ACPI_AEST_NODE_GROUP_FORMAT_64K 2 + +struct acpi_aest_node_interface_common { + u32 error_node_device; + u32 processor_affinity; + u64 error_group_register_base; + u64 fault_inject_register_base; + u64 interrupt_config_register_base; +}; + +struct acpi_aest_node_interface_4k { + u64 error_record_implemented; + u64 error_status_reporting; + u64 addressing_mode; + struct acpi_aest_node_interface_common common; +}; + +struct acpi_aest_node_interface_16k { + u64 error_record_implemented[4]; + u64 error_status_reporting[4]; + u64 addressing_mode[4]; + struct acpi_aest_node_interface_common common; +}; + +struct acpi_aest_node_interface_64k { + u64 error_record_implemented[14]; + u64 error_status_reporting[14]; + u64 addressing_mode[14]; + struct acpi_aest_node_interface_common common; +}; + /* Values for Type field above */ -#define ACPI_AEST_NODE_SYSTEM_REGISTER 0 -#define ACPI_AEST_NODE_MEMORY_MAPPED 1 -#define ACPI_AEST_XFACE_RESERVED 2 /* 2 and above are reserved */ +#define ACPI_AEST_NODE_SYSTEM_REGISTER 0 +#define ACPI_AEST_NODE_MEMORY_MAPPED 1 +#define ACPI_AEST_NODE_SINGLE_RECORD_MEMORY_MAPPED 2 +#define ACPI_AEST_XFACE_RESERVED 3 /* 2 and above are reserved */ /* Node Interrupt Structure */ @@ -236,6 +303,16 @@ typedef struct acpi_aest_node_interrupt { } acpi_aest_node_interrupt; +/* Node Interrupt Structure V2 */ + +struct acpi_aest_node_interrupt_v2 { + u8 type; + u8 reserved[2]; + u8 flags; + u32 gsiv; + u8 reserved1[4]; +}; + /* Values for Type field above */ #define ACPI_AEST_NODE_FAULT_HANDLING 0 @@ -1889,22 +1966,22 @@ struct nfit_device_handle { /******************************************************************************* * - * NHLT - Non HD Audio Link Table - * - * Conforms to: Intel Smart Sound Technology NHLT Specification - * Version 0.8.1, January 2020. + * NHLT - Non HDAudio Link Table + * Version 1 * ******************************************************************************/ -/* Main table */ - struct acpi_table_nhlt { struct acpi_table_header header; /* Common ACPI table header */ - u8 endpoint_count; + u8 endpoints_count; + /* + * struct acpi_nhlt_endpoint endpoints[]; + * struct acpi_nhlt_config oed_config; + */ }; struct acpi_nhlt_endpoint { - u32 descriptor_length; + u32 length; u8 link_type; u8 instance_id; u16 vendor_id; @@ -1914,84 +1991,135 @@ struct acpi_nhlt_endpoint { u8 device_type; u8 direction; u8 virtual_bus_id; + /* + * struct acpi_nhlt_config device_config; + * struct acpi_nhlt_formats_config formats_config; + * struct acpi_nhlt_devices_info devices_info; + */ }; -/* Types for link_type field above */ - -#define ACPI_NHLT_RESERVED_HD_AUDIO 0 -#define ACPI_NHLT_RESERVED_DSP 1 -#define ACPI_NHLT_PDM 2 -#define ACPI_NHLT_SSP 3 -#define ACPI_NHLT_RESERVED_SLIMBUS 4 -#define ACPI_NHLT_RESERVED_SOUNDWIRE 5 -#define ACPI_NHLT_TYPE_RESERVED 6 /* 6 and above are reserved */ - -/* All other values above are reserved */ +/* + * Values for link_type field above + * + * Only types PDM and SSP are used + */ +#define ACPI_NHLT_LINKTYPE_HDA 0 +#define ACPI_NHLT_LINKTYPE_DSP 1 +#define ACPI_NHLT_LINKTYPE_PDM 2 +#define ACPI_NHLT_LINKTYPE_SSP 3 +#define ACPI_NHLT_LINKTYPE_SLIMBUS 4 +#define ACPI_NHLT_LINKTYPE_SDW 5 +#define ACPI_NHLT_LINKTYPE_UAOL 6 /* Values for device_id field above */ -#define ACPI_NHLT_PDM_DMIC 0xAE20 -#define ACPI_NHLT_BT_SIDEBAND 0xAE30 -#define ACPI_NHLT_I2S_TDM_CODECS 0xAE23 +#define ACPI_NHLT_DEVICEID_DMIC 0xAE20 +#define ACPI_NHLT_DEVICEID_BT 0xAE30 +#define ACPI_NHLT_DEVICEID_I2S 0xAE34 /* Values for device_type field above */ -/* SSP Link */ - -#define ACPI_NHLT_LINK_BT_SIDEBAND 0 -#define ACPI_NHLT_LINK_FM 1 -#define ACPI_NHLT_LINK_MODEM 2 -/* 3 is reserved */ -#define ACPI_NHLT_LINK_SSP_ANALOG_CODEC 4 - -/* PDM Link */ - -#define ACPI_NHLT_PDM_ON_CAVS_1P8 0 -#define ACPI_NHLT_PDM_ON_CAVS_1P5 1 +/* + * Device types unique to endpoint of link_type=PDM + * + * Type PDM used for all SKL+ platforms + */ +#define ACPI_NHLT_DEVICETYPE_PDM 0 +#define ACPI_NHLT_DEVICETYPE_PDM_SKL 1 +/* Device types unique to endpoint of link_type=SSP */ +#define ACPI_NHLT_DEVICETYPE_BT 0 +#define ACPI_NHLT_DEVICETYPE_FM 1 +#define ACPI_NHLT_DEVICETYPE_MODEM 2 +#define ACPI_NHLT_DEVICETYPE_CODEC 4 /* Values for Direction field above */ -#define ACPI_NHLT_DIR_RENDER 0 -#define ACPI_NHLT_DIR_CAPTURE 1 -#define ACPI_NHLT_DIR_RENDER_LOOPBACK 2 -#define ACPI_NHLT_DIR_RENDER_FEEDBACK 3 -#define ACPI_NHLT_DIR_RESERVED 4 /* 4 and above are reserved */ +#define ACPI_NHLT_DIR_RENDER 0 +#define ACPI_NHLT_DIR_CAPTURE 1 -struct acpi_nhlt_device_specific_config { +struct acpi_nhlt_config { u32 capabilities_size; + u8 capabilities[]; +}; + +struct acpi_nhlt_gendevice_config { u8 virtual_slot; u8 config_type; }; -struct acpi_nhlt_device_specific_config_a { - u32 capabilities_size; +/* Values for config_type field above */ + +#define ACPI_NHLT_CONFIGTYPE_GENERIC 0 +#define ACPI_NHLT_CONFIGTYPE_MICARRAY 1 + +struct acpi_nhlt_micdevice_config { u8 virtual_slot; u8 config_type; u8 array_type; }; -/* Values for Config Type above */ +/* Values for array_type field above */ -#define ACPI_NHLT_CONFIG_TYPE_GENERIC 0x00 -#define ACPI_NHLT_CONFIG_TYPE_MIC_ARRAY 0x01 -#define ACPI_NHLT_CONFIG_TYPE_RENDER_FEEDBACK 0x03 -#define ACPI_NHLT_CONFIG_TYPE_RESERVED 0x04 /* 4 and above are reserved */ +#define ACPI_NHLT_ARRAYTYPE_LINEAR2_SMALL 0xA +#define ACPI_NHLT_ARRAYTYPE_LINEAR2_BIG 0xB +#define ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO1 0xC +#define ACPI_NHLT_ARRAYTYPE_PLANAR4_LSHAPED 0xD +#define ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO2 0xE +#define ACPI_NHLT_ARRAYTYPE_VENDOR 0xF -struct acpi_nhlt_device_specific_config_b { - u32 capabilities_size; +struct acpi_nhlt_vendor_mic_config { + u8 type; + u8 panel; + u16 speaker_position_distance; /* mm */ + u16 horizontal_offset; /* mm */ + u16 vertical_offset; /* mm */ + u8 frequency_low_band; /* 5*Hz */ + u8 frequency_high_band; /* 500*Hz */ + u16 direction_angle; /* -180 - +180 */ + u16 elevation_angle; /* -180 - +180 */ + u16 work_vertical_angle_begin; /* -180 - +180 with 2 deg step */ + u16 work_vertical_angle_end; /* -180 - +180 with 2 deg step */ + u16 work_horizontal_angle_begin; /* -180 - +180 with 2 deg step */ + u16 work_horizontal_angle_end; /* -180 - +180 with 2 deg step */ }; -struct acpi_nhlt_device_specific_config_c { - u32 capabilities_size; +/* Values for Type field above */ + +#define ACPI_NHLT_MICTYPE_OMNIDIRECTIONAL 0 +#define ACPI_NHLT_MICTYPE_SUBCARDIOID 1 +#define ACPI_NHLT_MICTYPE_CARDIOID 2 +#define ACPI_NHLT_MICTYPE_SUPERCARDIOID 3 +#define ACPI_NHLT_MICTYPE_HYPERCARDIOID 4 +#define ACPI_NHLT_MICTYPE_8SHAPED 5 +#define ACPI_NHLT_MICTYPE_RESERVED 6 +#define ACPI_NHLT_MICTYPE_VENDORDEFINED 7 + +/* Values for Panel field above */ + +#define ACPI_NHLT_MICLOCATION_TOP 0 +#define ACPI_NHLT_MICLOCATION_BOTTOM 1 +#define ACPI_NHLT_MICLOCATION_LEFT 2 +#define ACPI_NHLT_MICLOCATION_RIGHT 3 +#define ACPI_NHLT_MICLOCATION_FRONT 4 +#define ACPI_NHLT_MICLOCATION_REAR 5 + +struct acpi_nhlt_vendor_micdevice_config { u8 virtual_slot; + u8 config_type; + u8 array_type; + u8 mics_count; + struct acpi_nhlt_vendor_mic_config mics[]; }; -struct acpi_nhlt_render_device_specific_config { - u32 capabilities_size; +union acpi_nhlt_device_config { u8 virtual_slot; + struct acpi_nhlt_gendevice_config gen; + struct acpi_nhlt_micdevice_config mic; + struct acpi_nhlt_vendor_micdevice_config vendor_mic; }; -struct acpi_nhlt_wave_extensible { +/* Inherited from Microsoft's WAVEFORMATEXTENSIBLE. */ +struct acpi_nhlt_wave_formatext { u16 format_tag; u16 channel_count; u32 samples_per_sec; @@ -2001,144 +2129,28 @@ struct acpi_nhlt_wave_extensible { u16 extra_format_size; u16 valid_bits_per_sample; u32 channel_mask; - u8 sub_format_guid[16]; -}; - -/* Values for channel_mask above */ - -#define ACPI_NHLT_SPKR_FRONT_LEFT 0x1 -#define ACPI_NHLT_SPKR_FRONT_RIGHT 0x2 -#define ACPI_NHLT_SPKR_FRONT_CENTER 0x4 -#define ACPI_NHLT_SPKR_LOW_FREQ 0x8 -#define ACPI_NHLT_SPKR_BACK_LEFT 0x10 -#define ACPI_NHLT_SPKR_BACK_RIGHT 0x20 -#define ACPI_NHLT_SPKR_FRONT_LEFT_OF_CENTER 0x40 -#define ACPI_NHLT_SPKR_FRONT_RIGHT_OF_CENTER 0x80 -#define ACPI_NHLT_SPKR_BACK_CENTER 0x100 -#define ACPI_NHLT_SPKR_SIDE_LEFT 0x200 -#define ACPI_NHLT_SPKR_SIDE_RIGHT 0x400 -#define ACPI_NHLT_SPKR_TOP_CENTER 0x800 -#define ACPI_NHLT_SPKR_TOP_FRONT_LEFT 0x1000 -#define ACPI_NHLT_SPKR_TOP_FRONT_CENTER 0x2000 -#define ACPI_NHLT_SPKR_TOP_FRONT_RIGHT 0x4000 -#define ACPI_NHLT_SPKR_TOP_BACK_LEFT 0x8000 -#define ACPI_NHLT_SPKR_TOP_BACK_CENTER 0x10000 -#define ACPI_NHLT_SPKR_TOP_BACK_RIGHT 0x20000 + u8 subformat[16]; +}; struct acpi_nhlt_format_config { - struct acpi_nhlt_wave_extensible format; - u32 capability_size; - u8 capabilities[]; + struct acpi_nhlt_wave_formatext format; + struct acpi_nhlt_config config; }; struct acpi_nhlt_formats_config { u8 formats_count; + struct acpi_nhlt_format_config formats[]; }; -struct acpi_nhlt_device_specific_hdr { - u8 virtual_slot; - u8 config_type; -}; - -/* Types for config_type above */ - -#define ACPI_NHLT_GENERIC 0 -#define ACPI_NHLT_MIC 1 -#define ACPI_NHLT_RENDER 3 - -struct acpi_nhlt_mic_device_specific_config { - struct acpi_nhlt_device_specific_hdr device_config; - u8 array_type_ext; -}; - -/* Values for array_type_ext above */ - -#define ACPI_NHLT_ARRAY_TYPE_RESERVED 0x09 /* 9 and below are reserved */ -#define ACPI_NHLT_SMALL_LINEAR_2ELEMENT 0x0A -#define ACPI_NHLT_BIG_LINEAR_2ELEMENT 0x0B -#define ACPI_NHLT_FIRST_GEOMETRY_LINEAR_4ELEMENT 0x0C -#define ACPI_NHLT_PLANAR_LSHAPED_4ELEMENT 0x0D -#define ACPI_NHLT_SECOND_GEOMETRY_LINEAR_4ELEMENT 0x0E -#define ACPI_NHLT_VENDOR_DEFINED 0x0F -#define ACPI_NHLT_ARRAY_TYPE_MASK 0x0F -#define ACPI_NHLT_ARRAY_TYPE_EXT_MASK 0x10 - -#define ACPI_NHLT_NO_EXTENSION 0x0 -#define ACPI_NHLT_MIC_SNR_SENSITIVITY_EXT (1<<4) - -struct acpi_nhlt_vendor_mic_count { - u8 microphone_count; -}; - -struct acpi_nhlt_vendor_mic_config { - u8 type; - u8 panel; - u16 speaker_position_distance; /* mm */ - u16 horizontal_offset; /* mm */ - u16 vertical_offset; /* mm */ - u8 frequency_low_band; /* 5*Hz */ - u8 frequency_high_band; /* 500*Hz */ - u16 direction_angle; /* -180 - + 180 */ - u16 elevation_angle; /* -180 - + 180 */ - u16 work_vertical_angle_begin; /* -180 - + 180 with 2 deg step */ - u16 work_vertical_angle_end; /* -180 - + 180 with 2 deg step */ - u16 work_horizontal_angle_begin; /* -180 - + 180 with 2 deg step */ - u16 work_horizontal_angle_end; /* -180 - + 180 with 2 deg step */ -}; - -/* Values for Type field above */ - -#define ACPI_NHLT_MIC_OMNIDIRECTIONAL 0 -#define ACPI_NHLT_MIC_SUBCARDIOID 1 -#define ACPI_NHLT_MIC_CARDIOID 2 -#define ACPI_NHLT_MIC_SUPER_CARDIOID 3 -#define ACPI_NHLT_MIC_HYPER_CARDIOID 4 -#define ACPI_NHLT_MIC_8_SHAPED 5 -#define ACPI_NHLT_MIC_RESERVED6 6 /* 6 is reserved */ -#define ACPI_NHLT_MIC_VENDOR_DEFINED 7 -#define ACPI_NHLT_MIC_RESERVED 8 /* 8 and above are reserved */ - -/* Values for Panel field above */ - -#define ACPI_NHLT_MIC_POSITION_TOP 0 -#define ACPI_NHLT_MIC_POSITION_BOTTOM 1 -#define ACPI_NHLT_MIC_POSITION_LEFT 2 -#define ACPI_NHLT_MIC_POSITION_RIGHT 3 -#define ACPI_NHLT_MIC_POSITION_FRONT 4 -#define ACPI_NHLT_MIC_POSITION_BACK 5 -#define ACPI_NHLT_MIC_POSITION_RESERVED 6 /* 6 and above are reserved */ - -struct acpi_nhlt_vendor_mic_device_specific_config { - struct acpi_nhlt_mic_device_specific_config mic_array_device_config; - u8 number_of_microphones; - struct acpi_nhlt_vendor_mic_config mic_config[]; /* Indexed by number_of_microphones */ -}; - -/* Microphone SNR and Sensitivity extension */ - -struct acpi_nhlt_mic_snr_sensitivity_extension { - u32 SNR; - u32 sensitivity; -}; - -/* Render device with feedback */ - -struct acpi_nhlt_render_feedback_device_specific_config { - u8 feedback_virtual_slot; /* Render slot in case of capture */ - u16 feedback_channels; /* Informative only */ - u16 feedback_valid_bits_per_sample; -}; - -/* Non documented structures */ - -struct acpi_nhlt_device_info_count { - u8 structure_count; +struct acpi_nhlt_device_info { + u8 id[16]; + u8 instance_id; + u8 port_id; }; -struct acpi_nhlt_device_info { - u8 device_id[16]; - u8 device_instance_id; - u8 device_port_id; +struct acpi_nhlt_devices_info { + u8 devices_count; + struct acpi_nhlt_device_info devices[]; }; /******************************************************************************* @@ -2753,6 +2765,134 @@ enum acpi_rasf_status { /******************************************************************************* * + * RAS2 - RAS2 Feature Table (ACPI 6.5) + * Version 1 + * + * + ******************************************************************************/ + +struct acpi_table_ras2 { + struct acpi_table_header header; /* Common ACPI table header */ + u16 reserved; + u16 num_pcc_descs; +}; + +/* RAS2 Platform Communication Channel Descriptor */ + +struct acpi_ras2_pcc_desc { + u8 channel_id; + u16 reserved; + u8 feature_type; + u32 instance; +}; + +/* RAS2 Platform Communication Channel Shared Memory Region */ + +struct acpi_ras2_shared_memory { + u32 signature; + u16 command; + u16 status; + u16 version; + u8 features[16]; + u8 set_capabilities[16]; + u16 num_parameter_blocks; + u32 set_capabilities_status; +}; + +/* RAS2 Parameter Block Structure for PATROL_SCRUB */ + +struct acpi_ras2_parameter_block { + u16 type; + u16 version; + u16 length; +}; + +/* RAS2 Parameter Block Structure for PATROL_SCRUB */ + +struct acpi_ras2_patrol_scrub_parameter { + struct acpi_ras2_parameter_block header; + u16 patrol_scrub_command; + u64 requested_address_range[2]; + u64 actual_address_range[2]; + u32 flags; + u32 scrub_params_out; + u32 scrub_params_in; +}; + +/* Masks for Flags field above */ + +#define ACPI_RAS2_SCRUBBER_RUNNING 1 + +/* RAS2 Parameter Block Structure for LA2PA_TRANSLATION */ + +struct acpi_ras2_la2pa_translation_parameter { + struct acpi_ras2_parameter_block header; + u16 addr_translation_command; + u64 sub_inst_id; + u64 logical_address; + u64 physical_address; + u32 status; +}; + +/* Channel Commands */ + +enum acpi_ras2_commands { + ACPI_RAS2_EXECUTE_RAS2_COMMAND = 1 +}; + +/* Platform RAS2 Features */ + +enum acpi_ras2_features { + ACPI_RAS2_PATROL_SCRUB_SUPPORTED = 0, + ACPI_RAS2_LA2PA_TRANSLATION = 1 +}; + +/* RAS2 Patrol Scrub Commands */ + +enum acpi_ras2_patrol_scrub_commands { + ACPI_RAS2_GET_PATROL_PARAMETERS = 1, + ACPI_RAS2_START_PATROL_SCRUBBER = 2, + ACPI_RAS2_STOP_PATROL_SCRUBBER = 3 +}; + +/* RAS2 LA2PA Translation Commands */ + +enum acpi_ras2_la2_pa_translation_commands { + ACPI_RAS2_GET_LA2PA_TRANSLATION = 1, +}; + +/* RAS2 LA2PA Translation Status values */ + +enum acpi_ras2_la2_pa_translation_status { + ACPI_RAS2_LA2PA_TRANSLATION_SUCCESS = 0, + ACPI_RAS2_LA2PA_TRANSLATION_FAIL = 1, +}; + +/* Channel Command flags */ + +#define ACPI_RAS2_GENERATE_SCI (1<<15) + +/* Status values */ + +enum acpi_ras2_status { + ACPI_RAS2_SUCCESS = 0, + ACPI_RAS2_NOT_VALID = 1, + ACPI_RAS2_NOT_SUPPORTED = 2, + ACPI_RAS2_BUSY = 3, + ACPI_RAS2_FAILED = 4, + ACPI_RAS2_ABORTED = 5, + ACPI_RAS2_INVALID_DATA = 6 +}; + +/* Status flags */ + +#define ACPI_RAS2_COMMAND_COMPLETE (1) +#define ACPI_RAS2_SCI_DOORBELL (1<<1) +#define ACPI_RAS2_ERROR (1<<2) +#define ACPI_RAS2_STATUS (0x1F<<3) + +/******************************************************************************* + * * RGRT - Regulatory Graphics Resource Table * Version 1 * diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index c080d579a5..8f775e3a08 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -192,7 +192,8 @@ enum acpi_srat_type { ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */ ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */ ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */ - ACPI_SRAT_TYPE_RESERVED = 7 /* 7 and greater are reserved */ + ACPI_SRAT_TYPE_RINTC_AFFINITY = 7, /* ACPI 6.6 */ + ACPI_SRAT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ }; /* @@ -296,6 +297,21 @@ struct acpi_srat_generic_affinity { #define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */ #define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */ +/* 7: RINTC Affinity Structure(ACPI 6.6) */ + +struct acpi_srat_rintc_affinity { + struct acpi_subtable_header header; + u16 reserved; + u32 proximity_domain; + u32 acpi_processor_uid; + u32 flags; + u32 clock_domain; +}; + +/* Flags for struct acpi_srat_rintc_affinity */ + +#define ACPI_SRAT_RINTC_ENABLED (1) /* 00: Use affinity structure */ + /******************************************************************************* * * STAO - Status Override Table (_STA override) - ACPI 6.0 diff --git a/include/acpi/nhlt.h b/include/acpi/nhlt.h new file mode 100644 index 0000000000..2108aa6d02 --- /dev/null +++ b/include/acpi/nhlt.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2023-2024 Intel Corporation + * + * Authors: Cezary Rojewski <cezary.rojewski@intel.com> + * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> + */ + +#ifndef __ACPI_NHLT_H__ +#define __ACPI_NHLT_H__ + +#include <linux/acpi.h> +#include <linux/kconfig.h> +#include <linux/overflow.h> +#include <linux/types.h> + +#define __acpi_nhlt_endpoint_config(ep) ((void *)((ep) + 1)) +#define __acpi_nhlt_config_caps(cfg) ((void *)((cfg) + 1)) + +/** + * acpi_nhlt_endpoint_fmtscfg - Get the formats configuration space. + * @ep: the endpoint to retrieve the space for. + * + * Return: A pointer to the formats configuration space. + */ +static inline struct acpi_nhlt_formats_config * +acpi_nhlt_endpoint_fmtscfg(const struct acpi_nhlt_endpoint *ep) +{ + struct acpi_nhlt_config *cfg = __acpi_nhlt_endpoint_config(ep); + + return (struct acpi_nhlt_formats_config *)((u8 *)(cfg + 1) + cfg->capabilities_size); +} + +#define __acpi_nhlt_first_endpoint(tb) \ + ((void *)(tb + 1)) + +#define __acpi_nhlt_next_endpoint(ep) \ + ((void *)((u8 *)(ep) + (ep)->length)) + +#define __acpi_nhlt_get_endpoint(tb, ep, i) \ + ((i) ? __acpi_nhlt_next_endpoint(ep) : __acpi_nhlt_first_endpoint(tb)) + +#define __acpi_nhlt_first_fmtcfg(fmts) \ + ((void *)(fmts + 1)) + +#define __acpi_nhlt_next_fmtcfg(fmt) \ + ((void *)((u8 *)((fmt) + 1) + (fmt)->config.capabilities_size)) + +#define __acpi_nhlt_get_fmtcfg(fmts, fmt, i) \ + ((i) ? __acpi_nhlt_next_fmtcfg(fmt) : __acpi_nhlt_first_fmtcfg(fmts)) + +/* + * The for_each_nhlt_*() macros rely on an iterator to deal with the + * variable length of each endpoint structure and the possible presence + * of an OED-Config used by Windows only. + */ + +/** + * for_each_nhlt_endpoint - Iterate over endpoints in a NHLT table. + * @tb: the pointer to a NHLT table. + * @ep: the pointer to endpoint to use as loop cursor. + */ +#define for_each_nhlt_endpoint(tb, ep) \ + for (unsigned int __i = 0; \ + __i < (tb)->endpoints_count && \ + (ep = __acpi_nhlt_get_endpoint(tb, ep, __i)); \ + __i++) + +/** + * for_each_nhlt_fmtcfg - Iterate over format configurations. + * @fmts: the pointer to formats configuration space. + * @fmt: the pointer to format to use as loop cursor. + */ +#define for_each_nhlt_fmtcfg(fmts, fmt) \ + for (unsigned int __i = 0; \ + __i < (fmts)->formats_count && \ + (fmt = __acpi_nhlt_get_fmtcfg(fmts, fmt, __i)); \ + __i++) + +/** + * for_each_nhlt_endpoint_fmtcfg - Iterate over format configurations in an endpoint. + * @ep: the pointer to an endpoint. + * @fmt: the pointer to format to use as loop cursor. + */ +#define for_each_nhlt_endpoint_fmtcfg(ep, fmt) \ + for_each_nhlt_fmtcfg(acpi_nhlt_endpoint_fmtscfg(ep), fmt) + +#if IS_ENABLED(CONFIG_ACPI_NHLT) + +/* + * System-wide pointer to the first NHLT table. + * + * A sound driver may utilize acpi_nhlt_get/put_gbl_table() on its + * initialization and removal respectively to avoid excessive mapping + * and unmapping of the memory occupied by the table between streaming + * operations. + */ + +acpi_status acpi_nhlt_get_gbl_table(void); +void acpi_nhlt_put_gbl_table(void); + +bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep, + int link_type, int dev_type, int dir, int bus_id); +struct acpi_nhlt_endpoint * +acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id); +struct acpi_nhlt_endpoint * +acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id); +struct acpi_nhlt_format_config * +acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep, + u16 ch, u32 rate, u16 vbps, u16 bps); +struct acpi_nhlt_format_config * +acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vpbs, u16 bps); +struct acpi_nhlt_format_config * +acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vpbs, u16 bps); +int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep); + +#else /* !CONFIG_ACPI_NHLT */ + +static inline acpi_status acpi_nhlt_get_gbl_table(void) +{ + return AE_NOT_FOUND; +} + +static inline void acpi_nhlt_put_gbl_table(void) +{ +} + +static inline bool +acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep, + int link_type, int dev_type, int dir, int bus_id) +{ + return false; +} + +static inline struct acpi_nhlt_endpoint * +acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id) +{ + return NULL; +} + +static inline struct acpi_nhlt_format_config * +acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep, + u16 ch, u32 rate, u16 vbps, u16 bps) +{ + return NULL; +} + +static inline struct acpi_nhlt_format_config * +acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb, + int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vpbs, u16 bps) +{ + return NULL; +} + +static inline int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep) +{ + return 0; +} + +static inline struct acpi_nhlt_endpoint * +acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id) +{ + return NULL; +} + +static inline struct acpi_nhlt_format_config * +acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id, + u16 ch, u32 rate, u16 vpbs, u16 bps) +{ + return NULL; +} + +#endif /* CONFIG_ACPI_NHLT */ + +#endif /* __ACPI_NHLT_H__ */ diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index 600d4e2641..62cac266a1 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -47,26 +47,19 @@ acpi_status acpi_os_terminate(void); * However, boot has (system_state != SYSTEM_RUNNING) * to quiet __might_sleep() in kmalloc() and resume does not. */ -static inline void *acpi_os_allocate(acpi_size size) -{ - return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); -} +#define acpi_os_allocate(_size) \ + kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) -static inline void *acpi_os_allocate_zeroed(acpi_size size) -{ - return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); -} +#define acpi_os_allocate_zeroed(_size) \ + kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) static inline void acpi_os_free(void *memory) { kfree(memory); } -static inline void *acpi_os_acquire_object(acpi_cache_t * cache) -{ - return kmem_cache_zalloc(cache, - irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); -} +#define acpi_os_acquire_object(_cache) \ + kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) static inline acpi_thread_id acpi_os_get_thread_id(void) { diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index d436bee4d1..b20fa25a7e 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -22,7 +22,6 @@ mandatory-y += dma-mapping.h mandatory-y += dma.h mandatory-y += emergency-restart.h mandatory-y += exec.h -mandatory-y += fb.h mandatory-y += ftrace.h mandatory-y += futex.h mandatory-y += hardirq.h @@ -62,5 +61,6 @@ mandatory-y += uaccess.h mandatory-y += unaligned.h mandatory-y += vermagic.h mandatory-y += vga.h +mandatory-y += video.h mandatory-y += word-at-a-time.h mandatory-y += xor.h diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h index 446fea6dda..2d08c750c8 100644 --- a/include/asm-generic/bitops/__ffs.h +++ b/include/asm-generic/bitops/__ffs.h @@ -10,9 +10,9 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned long generic___ffs(unsigned long word) +static __always_inline unsigned int generic___ffs(unsigned long word) { - int num = 0; + unsigned int num = 0; #if BITS_PER_LONG == 64 if ((word & 0xffffffff) == 0) { diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h index 54ccccf96e..e974ec932e 100644 --- a/include/asm-generic/bitops/__fls.h +++ b/include/asm-generic/bitops/__fls.h @@ -10,9 +10,9 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long generic___fls(unsigned long word) +static __always_inline unsigned int generic___fls(unsigned long word) { - int num = BITS_PER_LONG - 1; + unsigned int num = BITS_PER_LONG - 1; #if BITS_PER_LONG == 64 if (!(word & (~0ul << 32))) { diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h index 87024da44d..cf4b3d33bf 100644 --- a/include/asm-generic/bitops/builtin-__ffs.h +++ b/include/asm-generic/bitops/builtin-__ffs.h @@ -8,7 +8,7 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __ffs(unsigned long word) +static __always_inline unsigned int __ffs(unsigned long word) { return __builtin_ctzl(word); } diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h index 43a5aa9afb..6d72fc8a52 100644 --- a/include/asm-generic/bitops/builtin-__fls.h +++ b/include/asm-generic/bitops/builtin-__fls.h @@ -8,7 +8,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __fls(unsigned long word) +static __always_inline unsigned int __fls(unsigned long word) { return (sizeof(word) * 8) - 1 - __builtin_clzl(word); } diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index b7de3a4ead..3877209339 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -72,7 +72,7 @@ struct bug_entry { #endif /* - * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report + * WARN(), WARN_ON(), WARN_ON_ONCE(), and so on can be used to report * significant kernel issues that need prompt attention if they should ever * appear at runtime. * diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h new file mode 100644 index 0000000000..64f536b803 --- /dev/null +++ b/include/asm-generic/codetag.lds.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GENERIC_CODETAG_LDS_H +#define __ASM_GENERIC_CODETAG_LDS_H + +#define SECTION_WITH_BOUNDARIES(_name) \ + . = ALIGN(8); \ + __start_##_name = .; \ + KEEP(*(_name)) \ + __stop_##_name = .; + +#define CODETAG_SECTIONS() \ + SECTION_WITH_BOUNDARIES(alloc_tags) + +#endif /* __ASM_GENERIC_CODETAG_LDS_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bac63e874c..80de699bf6 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -9,6 +9,7 @@ #include <asm/page.h> /* I/O is all done through memory accesses */ #include <linux/string.h> /* for memset() and memcpy() */ +#include <linux/sizes.h> #include <linux/types.h> #include <linux/instruction_pointer.h> @@ -991,7 +992,6 @@ static inline void iowrite64_rep(volatile void __iomem *addr, #ifdef __KERNEL__ -#include <linux/vmalloc.h> #define __io_virt(x) ((void __force *)(x)) /* diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h deleted file mode 100644 index 9773582fd9..0000000000 --- a/include/asm-generic/page.h +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_GENERIC_PAGE_H -#define __ASM_GENERIC_PAGE_H -/* - * Generic page.h implementation, for NOMMU architectures. - * This provides the dummy definitions for the memory management. - */ - -#ifdef CONFIG_MMU -#error need to provide a real asm/page.h -#endif - - -/* PAGE_SHIFT determines the page size */ - -#define PAGE_SHIFT 12 -#ifdef __ASSEMBLY__ -#define PAGE_SIZE (1 << PAGE_SHIFT) -#else -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#endif -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#include <asm/setup.h> - -#ifndef __ASSEMBLY__ - -#define clear_page(page) memset((page), 0, PAGE_SIZE) -#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -/* - * These are used to make use of C type-checking.. - */ -typedef struct { - unsigned long pte; -} pte_t; -typedef struct { - unsigned long pmd[16]; -} pmd_t; -typedef struct { - unsigned long pgd; -} pgd_t; -typedef struct { - unsigned long pgprot; -} pgprot_t; -typedef struct page *pgtable_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((&x)->pmd[0]) -#define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgd(x) ((pgd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -extern unsigned long memory_start; -extern unsigned long memory_end; - -#endif /* !__ASSEMBLY__ */ - -#define PAGE_OFFSET (0) - -#ifndef ARCH_PFN_OFFSET -#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) -#endif - -#ifndef __ASSEMBLY__ - -#define __va(x) ((void *)((unsigned long) (x))) -#define __pa(x) ((unsigned long) (x)) - -static inline unsigned long virt_to_pfn(const void *kaddr) -{ - return __pa(kaddr) >> PAGE_SHIFT; -} -#define virt_to_pfn virt_to_pfn -static inline void *pfn_to_virt(unsigned long pfn) -{ - return __va(pfn) << PAGE_SHIFT; -} -#define pfn_to_virt pfn_to_virt - -#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr)) -#define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) - -#ifndef page_to_phys -#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) -#endif - -#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ - ((void *)(kaddr) < (void *)memory_end)) - -#endif /* __ASSEMBLY__ */ - -#include <asm-generic/memory_model.h> -#include <asm-generic/getorder.h> - -#endif /* __ASM_GENERIC_PAGE_H */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 879e5f8aa5..7c48f5fbf8 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -16,15 +16,16 @@ * * Return: pointer to the allocated memory or %NULL on error */ -static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) +static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) { - struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & + struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL & ~__GFP_HIGHMEM, 0); if (!ptdesc) return NULL; return ptdesc_address(ptdesc); } +#define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__)) #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL /** @@ -33,10 +34,11 @@ static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) * * Return: pointer to the allocated memory or %NULL on error */ -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) { - return __pte_alloc_one_kernel(mm); + return __pte_alloc_one_kernel_noprof(mm); } +#define pte_alloc_one_kernel(...) alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__)) #endif /** @@ -61,11 +63,11 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) * * Return: `struct page` referencing the ptdesc or %NULL on error */ -static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) +static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp) { struct ptdesc *ptdesc; - ptdesc = pagetable_alloc(gfp, 0); + ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; if (!pagetable_pte_ctor(ptdesc)) { @@ -75,6 +77,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) return ptdesc_page(ptdesc); } +#define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__)) #ifndef __HAVE_ARCH_PTE_ALLOC_ONE /** @@ -85,10 +88,11 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) * * Return: `struct page` referencing the ptdesc or %NULL on error */ -static inline pgtable_t pte_alloc_one(struct mm_struct *mm) +static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm) { - return __pte_alloc_one(mm, GFP_PGTABLE_USER); + return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER); } +#define pte_alloc_one(...) alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__)) #endif /* @@ -124,14 +128,14 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) * * Return: pointer to the allocated memory or %NULL on error */ -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) { struct ptdesc *ptdesc; gfp_t gfp = GFP_PGTABLE_USER; if (mm == &init_mm) gfp = GFP_PGTABLE_KERNEL; - ptdesc = pagetable_alloc(gfp, 0); + ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; if (!pagetable_pmd_ctor(ptdesc)) { @@ -140,6 +144,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) } return ptdesc_address(ptdesc); } +#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__)) #endif #ifndef __HAVE_ARCH_PMD_FREE @@ -157,7 +162,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #if CONFIG_PGTABLE_LEVELS > 3 -static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) { gfp_t gfp = GFP_PGTABLE_USER; struct ptdesc *ptdesc; @@ -166,13 +171,14 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) gfp = GFP_PGTABLE_KERNEL; gfp &= ~__GFP_HIGHMEM; - ptdesc = pagetable_alloc(gfp, 0); + ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; pagetable_pud_ctor(ptdesc); return ptdesc_address(ptdesc); } +#define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__)) #ifndef __HAVE_ARCH_PUD_ALLOC_ONE /** @@ -184,10 +190,11 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) * * Return: pointer to the allocated memory or %NULL on error */ -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) { - return __pud_alloc_one(mm, addr); + return __pud_alloc_one_noprof(mm, addr); } +#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__)) #endif static inline void __pud_free(struct mm_struct *mm, pud_t *pud) diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index db13bb620f..c768de6f19 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -180,6 +180,11 @@ static inline bool is_kernel_rodata(unsigned long addr) addr < (unsigned long)__end_rodata; } +static inline bool is_kernel_ro_after_init(unsigned long addr) +{ + return addr >= (unsigned long)__start_ro_after_init && + addr < (unsigned long)__end_ro_after_init; +} /** * is_kernel_inittext - checks if the pointer address is located in the * .init.text section diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index 933ca6581a..fabcefe8a8 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -19,7 +19,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, #ifndef sys_mmap asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - unsigned long fd, off_t pgoff); + unsigned long fd, unsigned long off); #endif #ifndef sys_rt_sigreturn diff --git a/include/asm-generic/fb.h b/include/asm-generic/video.h index 6ccabb400a..b1da2309d9 100644 --- a/include/asm-generic/fb.h +++ b/include/asm-generic/video.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_GENERIC_FB_H_ -#define __ASM_GENERIC_FB_H_ +#ifndef __ASM_GENERIC_VIDEO_H_ +#define __ASM_GENERIC_VIDEO_H_ /* * Only include this header file from your architecture's <asm/fb.h>. @@ -10,8 +10,9 @@ #include <linux/io.h> #include <linux/mm_types.h> #include <linux/pgtable.h> +#include <linux/types.h> -struct fb_info; +struct device; #ifndef pgprot_framebuffer #define pgprot_framebuffer pgprot_framebuffer @@ -23,11 +24,11 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot, } #endif -#ifndef fb_is_primary_device -#define fb_is_primary_device fb_is_primary_device -static inline int fb_is_primary_device(struct fb_info *info) +#ifndef video_is_primary_device +#define video_is_primary_device video_is_primary_device +static inline bool video_is_primary_device(struct device *dev) { - return 0; + return false; } #endif @@ -132,4 +133,4 @@ static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n) #define fb_memset fb_memset_io #endif -#endif /* __ASM_GENERIC_FB_H_ */ +#endif /* __ASM_GENERIC_VIDEO_H_ */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f7749d0f25..70bf100407 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -3,7 +3,7 @@ * linker scripts. * * A minimal linker scripts has following content: - * [This is a sample, architectures may have special requiriements] + * [This is a sample, architectures may have special requirements] * * OUTPUT_FORMAT(...) * OUTPUT_ARCH(...) @@ -50,6 +50,8 @@ * [__nosave_begin, __nosave_end] for the nosave data */ +#include <asm-generic/codetag.lds.h> + #ifndef LOAD_OFFSET #define LOAD_OFFSET 0 #endif @@ -101,7 +103,7 @@ #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* -#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text @@ -366,6 +368,7 @@ . = ALIGN(8); \ BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \ BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \ + CODETAG_SECTIONS() \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ @@ -399,13 +402,13 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - __start_init_task = .; \ + __start_init_stack = .; \ init_thread_union = .; \ init_stack = .; \ KEEP(*(.data..init_task)) \ KEEP(*(.data..init_thread_info)) \ - . = __start_init_task + THREAD_SIZE; \ - __end_init_task = .; + . = __start_init_stack + THREAD_SIZE; \ + __end_init_stack = .; #define JUMP_TABLE_DATA \ . = ALIGN(8); \ @@ -449,10 +452,29 @@ #endif /* + * Some symbol definitions will not exist yet during the first pass of the + * link, but are guaranteed to exist in the final link. Provide preliminary + * definitions that will be superseded in the final link to avoid having to + * rely on weak external linkage, which requires a GOT when used in position + * independent code. + */ +#define PRELIMINARY_SYMBOL_DEFINITIONS \ + PROVIDE(kallsyms_addresses = .); \ + PROVIDE(kallsyms_offsets = .); \ + PROVIDE(kallsyms_names = .); \ + PROVIDE(kallsyms_num_syms = .); \ + PROVIDE(kallsyms_relative_base = .); \ + PROVIDE(kallsyms_token_table = .); \ + PROVIDE(kallsyms_token_index = .); \ + PROVIDE(kallsyms_markers = .); \ + PROVIDE(kallsyms_seqs_of_names = .); + +/* * Read only Data */ #define RO_DATA(align) \ . = ALIGN((align)); \ + PRELIMINARY_SYMBOL_DEFINITIONS \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ *(.rodata) *(.rodata.*) \ diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h deleted file mode 100644 index b1a49677fe..0000000000 --- a/include/asm-generic/vtime.h +++ /dev/null @@ -1 +0,0 @@ -/* no content, but patch(1) dislikes empty files */ diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 80e243611f..54937b6152 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -56,31 +56,7 @@ struct crypto_acomp { struct crypto_tfm base; }; -/* - * struct crypto_istat_compress - statistics for compress algorithm - * @compress_cnt: number of compress requests - * @compress_tlen: total data size handled by compress requests - * @decompress_cnt: number of decompress requests - * @decompress_tlen: total data size handled by decompress requests - * @err_cnt: number of error for compress requests - */ -struct crypto_istat_compress { - atomic64_t compress_cnt; - atomic64_t compress_tlen; - atomic64_t decompress_cnt; - atomic64_t decompress_tlen; - atomic64_t err_cnt; -}; - -#ifdef CONFIG_CRYPTO_STATS -#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat; -#else -#define COMP_ALG_COMMON_STATS -#endif - #define COMP_ALG_COMMON { \ - COMP_ALG_COMMON_STATS \ - \ struct crypto_alg base; \ } struct comp_alg_common COMP_ALG_COMMON; @@ -261,27 +237,6 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } -static inline struct crypto_istat_compress *comp_get_stat( - struct comp_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&comp_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * @@ -293,19 +248,7 @@ static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err) */ static inline int crypto_acomp_compress(struct acomp_req *req) { - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - struct comp_alg_common *alg; - - alg = crypto_comp_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_compress *istat = comp_get_stat(alg); - - atomic64_inc(&istat->compress_cnt); - atomic64_add(req->slen, &istat->compress_tlen); - } - - return crypto_comp_errstat(alg, tfm->compress(req)); + return crypto_acomp_reqtfm(req)->compress(req); } /** @@ -319,19 +262,7 @@ static inline int crypto_acomp_compress(struct acomp_req *req) */ static inline int crypto_acomp_decompress(struct acomp_req *req) { - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - struct comp_alg_common *alg; - - alg = crypto_comp_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_compress *istat = comp_get_stat(alg); - - atomic64_inc(&istat->decompress_cnt); - atomic64_add(req->slen, &istat->decompress_tlen); - } - - return crypto_comp_errstat(alg, tfm->decompress(req)); + return crypto_acomp_reqtfm(req)->decompress(req); } #endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 51382befbe..0e8a416386 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -101,22 +101,6 @@ struct aead_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -/* - * struct crypto_istat_aead - statistics for AEAD algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @err_cnt: number of error for AEAD requests - */ -struct crypto_istat_aead { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t err_cnt; -}; - /** * struct aead_alg - AEAD cipher definition * @maxauthsize: Set the maximum authentication tag size supported by the @@ -135,7 +119,6 @@ struct crypto_istat_aead { * @setkey: see struct skcipher_alg * @encrypt: see struct skcipher_alg * @decrypt: see struct skcipher_alg - * @stat: statistics for AEAD algorithm * @ivsize: see struct skcipher_alg * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function @@ -162,10 +145,6 @@ struct aead_alg { int (*init)(struct crypto_aead *tfm); void (*exit)(struct crypto_aead *tfm); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_aead stat; -#endif - unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; diff --git a/include/crypto/aes.h b/include/crypto/aes.h index 2090729701..9339da7c20 100644 --- a/include/crypto/aes.h +++ b/include/crypto/aes.h @@ -87,4 +87,9 @@ void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); extern const u8 crypto_aes_sbox[]; extern const u8 crypto_aes_inv_sbox[]; +void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]); +void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]); + #endif diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 31c111bebb..18a10cad07 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -54,26 +54,6 @@ struct crypto_akcipher { struct crypto_tfm base; }; -/* - * struct crypto_istat_akcipher - statistics for akcipher algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @verify_cnt: number of verify operation - * @sign_cnt: number of sign requests - * @err_cnt: number of error for akcipher requests - */ -struct crypto_istat_akcipher { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t verify_cnt; - atomic64_t sign_cnt; - atomic64_t err_cnt; -}; - /** * struct akcipher_alg - generic public key algorithm * @@ -110,7 +90,6 @@ struct crypto_istat_akcipher { * @exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @init, used to remove various changes set in * @init. - * @stat: Statistics for akcipher algorithm * * @base: Common crypto API algorithm data structure */ @@ -127,10 +106,6 @@ struct akcipher_alg { int (*init)(struct crypto_akcipher *tfm); void (*exit)(struct crypto_akcipher *tfm); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_akcipher stat; -#endif - struct crypto_alg base; }; @@ -302,27 +277,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) return alg->max_size(tfm); } -static inline struct crypto_istat_akcipher *akcipher_get_stat( - struct akcipher_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&akcipher_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_akcipher_encrypt() - Invoke public key encrypt operation * @@ -336,16 +290,8 @@ static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err) static inline int crypto_akcipher_encrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); - - atomic64_inc(&istat->encrypt_cnt); - atomic64_add(req->src_len, &istat->encrypt_tlen); - } - - return crypto_akcipher_errstat(alg, alg->encrypt(req)); + return crypto_akcipher_alg(tfm)->encrypt(req); } /** @@ -361,16 +307,8 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) static inline int crypto_akcipher_decrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); - - atomic64_inc(&istat->decrypt_cnt); - atomic64_add(req->src_len, &istat->decrypt_tlen); - } - - return crypto_akcipher_errstat(alg, alg->decrypt(req)); + return crypto_akcipher_alg(tfm)->decrypt(req); } /** @@ -422,12 +360,8 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, static inline int crypto_akcipher_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&akcipher_get_stat(alg)->sign_cnt); - - return crypto_akcipher_errstat(alg, alg->sign(req)); + return crypto_akcipher_alg(tfm)->sign(req); } /** @@ -447,12 +381,8 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) static inline int crypto_akcipher_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&akcipher_get_stat(alg)->verify_cnt); - return crypto_akcipher_errstat(alg, alg->verify(req)); + return crypto_akcipher_alg(tfm)->verify(req); } /** diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 7a4a71af65..156de41ca7 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -61,9 +61,6 @@ struct crypto_type { void (*show)(struct seq_file *m, struct crypto_alg *alg); int (*report)(struct sk_buff *skb, struct crypto_alg *alg); void (*free)(struct crypto_instance *inst); -#ifdef CONFIG_CRYPTO_STATS - int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg); -#endif unsigned int type; unsigned int maskclear; diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h index 70964781eb..7d90c5e822 100644 --- a/include/crypto/ecc_curve.h +++ b/include/crypto/ecc_curve.h @@ -23,6 +23,7 @@ struct ecc_point { * struct ecc_curve - definition of elliptic curve * * @name: Short name of the curve. + * @nbits: The number of bits of a curve. * @g: Generator point of the curve. * @p: Prime number, if Barrett's reduction is used for this curve * pre-calculated value 'mu' is appended to the @p after ndigits. @@ -34,6 +35,7 @@ struct ecc_point { */ struct ecc_curve { char *name; + u32 nbits; struct ecc_point g; u64 *p; u64 *n; diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index a9f98078d2..9784ecdd2f 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h @@ -26,6 +26,7 @@ #define ECC_CURVE_NIST_P192 0x0001 #define ECC_CURVE_NIST_P256 0x0002 #define ECC_CURVE_NIST_P384 0x0003 +#define ECC_CURVE_NIST_P521 0x0004 /** * struct ecdh - define an ECDH private key diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 5d61f576cf..2d5ea9f9ff 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -24,26 +24,7 @@ struct crypto_ahash; */ /* - * struct crypto_istat_hash - statistics for has algorithm - * @hash_cnt: number of hash requests - * @hash_tlen: total data size hashed - * @err_cnt: number of error for hash requests - */ -struct crypto_istat_hash { - atomic64_t hash_cnt; - atomic64_t hash_tlen; - atomic64_t err_cnt; -}; - -#ifdef CONFIG_CRYPTO_STATS -#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat; -#else -#define HASH_ALG_COMMON_STAT -#endif - -/* * struct hash_alg_common - define properties of message digest - * @stat: Statistics for hash algorithm. * @digestsize: Size of the result of the transformation. A buffer of this size * must be available to the @final and @finup calls, so they can * store the resulting hash into it. For various predefined sizes, @@ -60,8 +41,6 @@ struct crypto_istat_hash { * information. */ #define HASH_ALG_COMMON { \ - HASH_ALG_COMMON_STAT \ - \ unsigned int digestsize; \ unsigned int statesize; \ \ @@ -243,7 +222,6 @@ struct shash_alg { }; }; #undef HASH_ALG_COMMON -#undef HASH_ALG_COMMON_STAT struct crypto_ahash { bool using_shash; /* Underlying algorithm is shash, not ahash */ @@ -578,19 +556,20 @@ static inline void ahash_request_set_tfm(struct ahash_request *req, * * Return: allocated request handle in case of success, or NULL if out of memory */ -static inline struct ahash_request *ahash_request_alloc( +static inline struct ahash_request *ahash_request_alloc_noprof( struct crypto_ahash *tfm, gfp_t gfp) { struct ahash_request *req; - req = kmalloc(sizeof(struct ahash_request) + - crypto_ahash_reqsize(tfm), gfp); + req = kmalloc_noprof(sizeof(struct ahash_request) + + crypto_ahash_reqsize(tfm), gfp); if (likely(req)) ahash_request_set_tfm(req, tfm); return req; } +#define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__)) /** * ahash_request_free() - zeroize and free the request data structure diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 78ecaf5db0..f7b3b93f3a 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -166,7 +166,8 @@ int af_alg_unregister_type(const struct af_alg_type *type); int af_alg_release(struct socket *sock); void af_alg_release_parent(struct sock *sk); -int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern); +int af_alg_accept(struct sock *sk, struct socket *newsock, + struct proto_accept_arg *arg); void af_alg_free_sg(struct af_alg_sgl *sgl); diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 4ac46bafba..8831edaafc 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -31,7 +31,6 @@ * @init. * * @reqsize: Context size for (de)compression requests - * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure * @calg: Cmonn algorithm data structure shared with scomp */ @@ -69,15 +68,16 @@ static inline void acomp_request_complete(struct acomp_req *req, crypto_request_complete(&req->base, err); } -static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm) { struct acomp_req *req; - req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); if (likely(req)) acomp_request_set_tfm(req, tfm); return req; } +#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__)) static inline void __acomp_request_free(struct acomp_req *req) { diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h deleted file mode 100644 index fd54074332..0000000000 --- a/include/crypto/internal/cryptouser.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/cryptouser.h> -#include <net/netlink.h> - -struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); - -#ifdef CONFIG_CRYPTO_STATS -int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); -#else -static inline int crypto_reportstat(struct sk_buff *in_skb, - struct nlmsghdr *in_nlh, - struct nlattr **attrs) -{ - return -ENOTSUPP; -} -#endif diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h index 4f6c1a6888..f7e75e1e71 100644 --- a/include/crypto/internal/ecc.h +++ b/include/crypto/internal/ecc.h @@ -33,7 +33,8 @@ #define ECC_CURVE_NIST_P192_DIGITS 3 #define ECC_CURVE_NIST_P256_DIGITS 4 #define ECC_CURVE_NIST_P384_DIGITS 6 -#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */ +#define ECC_CURVE_NIST_P521_DIGITS 9 +#define ECC_MAX_DIGITS DIV_ROUND_UP(521, 64) /* NIST P521 */ #define ECC_DIGITS_TO_BYTES_SHIFT 3 @@ -57,6 +58,16 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit } /** + * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array + * @in: Input byte array + * @nbytes Size of input byte array + * @out Output digits array + * @ndigits: Number of digits to create from byte array + */ +void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, + u64 *out, unsigned int ndigits); + +/** * ecc_is_key_valid() - Validate a given ECDH private key * * @curve_id: id representing the curve to use @@ -81,7 +92,8 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, * Returns 0 if the private key was generated successfully, a negative value * if an error occurred. */ -int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey); +int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, + u64 *private_key); /** * ecc_make_pub_key() - Compute an ECC public key diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 858fe3965a..07a10fd2d3 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -27,7 +27,6 @@ struct crypto_scomp { * @free_ctx: Function frees context allocated with alloc_ctx * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @stat: Statistics for compress algorithm * @base: Common crypto API algorithm data structure * @calg: Cmonn algorithm data structure shared with acomp */ diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 1988e24a0d..2d9c4de57b 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -51,20 +51,6 @@ struct crypto_kpp { struct crypto_tfm base; }; -/* - * struct crypto_istat_kpp - statistics for KPP algorithm - * @setsecret_cnt: number of setsecrey operation - * @generate_public_key_cnt: number of generate_public_key operation - * @compute_shared_secret_cnt: number of compute_shared_secret operation - * @err_cnt: number of error for KPP requests - */ -struct crypto_istat_kpp { - atomic64_t setsecret_cnt; - atomic64_t generate_public_key_cnt; - atomic64_t compute_shared_secret_cnt; - atomic64_t err_cnt; -}; - /** * struct kpp_alg - generic key-agreement protocol primitives * @@ -87,7 +73,6 @@ struct crypto_istat_kpp { * @exit: Undo everything @init did. * * @base: Common crypto API algorithm data structure - * @stat: Statistics for KPP algorithm */ struct kpp_alg { int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, @@ -100,10 +85,6 @@ struct kpp_alg { int (*init)(struct crypto_kpp *tfm); void (*exit)(struct crypto_kpp *tfm); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_kpp stat; -#endif - struct crypto_alg base; }; @@ -291,26 +272,6 @@ struct kpp_secret { unsigned short len; }; -static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&kpp_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_kpp_set_secret() - Invoke kpp operation * @@ -329,12 +290,7 @@ static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err) static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, const void *buffer, unsigned int len) { - struct kpp_alg *alg = crypto_kpp_alg(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt); - - return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len)); + return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len); } /** @@ -353,12 +309,8 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, static inline int crypto_kpp_generate_public_key(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct kpp_alg *alg = crypto_kpp_alg(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt); - return crypto_kpp_errstat(alg, alg->generate_public_key(req)); + return crypto_kpp_alg(tfm)->generate_public_key(req); } /** @@ -374,12 +326,8 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req) static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct kpp_alg *alg = crypto_kpp_alg(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt); - return crypto_kpp_errstat(alg, alg->compute_shared_secret(req)); + return crypto_kpp_alg(tfm)->compute_shared_secret(req); } /** diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 6abe5102e5..5ac4388f50 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -15,20 +15,6 @@ struct crypto_rng; -/* - * struct crypto_istat_rng: statistics for RNG algorithm - * @generate_cnt: number of RNG generate requests - * @generate_tlen: total data size of generated data by the RNG - * @seed_cnt: number of times the RNG was seeded - * @err_cnt: number of error for RNG requests - */ -struct crypto_istat_rng { - atomic64_t generate_cnt; - atomic64_t generate_tlen; - atomic64_t seed_cnt; - atomic64_t err_cnt; -}; - /** * struct rng_alg - random number generator definition * @@ -46,7 +32,6 @@ struct crypto_istat_rng { * size of the seed is defined with @seedsize . * @set_ent: Set entropy that would otherwise be obtained from * entropy source. Internal use only. - * @stat: Statistics for rng algorithm * @seedsize: The seed size required for a random number generator * initialization defined with this variable. Some * random number generators does not require a seed @@ -63,10 +48,6 @@ struct rng_alg { void (*set_ent)(struct crypto_rng *tfm, const u8 *data, unsigned int len); -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_rng stat; -#endif - unsigned int seedsize; struct crypto_alg base; @@ -144,26 +125,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } -static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_rng_errstat(struct rng_alg *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&rng_get_stat(alg)->err_cnt); - - return err; -} - /** * crypto_rng_generate() - get random number * @tfm: cipher handle @@ -182,17 +143,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { - struct rng_alg *alg = crypto_rng_alg(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { - struct crypto_istat_rng *istat = rng_get_stat(alg); - - atomic64_inc(&istat->generate_cnt); - atomic64_add(dlen, &istat->generate_tlen); - } - - return crypto_rng_errstat(alg, - alg->generate(tfm, src, slen, dst, dlen)); + return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); } /** diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index c8857d7bdb..18a86e0af0 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -65,28 +65,6 @@ struct crypto_lskcipher { }; /* - * struct crypto_istat_cipher - statistics for cipher algorithm - * @encrypt_cnt: number of encrypt requests - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_cnt: number of decrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @err_cnt: number of error for cipher requests - */ -struct crypto_istat_cipher { - atomic64_t encrypt_cnt; - atomic64_t encrypt_tlen; - atomic64_t decrypt_cnt; - atomic64_t decrypt_tlen; - atomic64_t err_cnt; -}; - -#ifdef CONFIG_CRYPTO_STATS -#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; -#else -#define SKCIPHER_ALG_COMMON_STAT -#endif - -/* * struct skcipher_alg_common - common properties of skcipher_alg * @min_keysize: Minimum key size supported by the transformation. This is the * smallest key length supported by this transformation algorithm. @@ -103,7 +81,6 @@ struct crypto_istat_cipher { * @chunksize: Equal to the block size except for stream ciphers such as * CTR where it is set to the underlying block size. * @statesize: Size of the internal state for the algorithm. - * @stat: Statistics for cipher algorithm * @base: Definition of a generic crypto algorithm. */ #define SKCIPHER_ALG_COMMON { \ @@ -113,8 +90,6 @@ struct crypto_istat_cipher { unsigned int chunksize; \ unsigned int statesize; \ \ - SKCIPHER_ALG_COMMON_STAT \ - \ struct crypto_alg base; \ } struct skcipher_alg_common SKCIPHER_ALG_COMMON; @@ -861,19 +836,20 @@ static inline struct skcipher_request *skcipher_request_cast( * * Return: allocated request handle in case of success, or NULL if out of memory */ -static inline struct skcipher_request *skcipher_request_alloc( +static inline struct skcipher_request *skcipher_request_alloc_noprof( struct crypto_skcipher *tfm, gfp_t gfp) { struct skcipher_request *req; - req = kmalloc(sizeof(struct skcipher_request) + - crypto_skcipher_reqsize(tfm), gfp); + req = kmalloc_noprof(sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(tfm), gfp); if (likely(req)) skcipher_request_set_tfm(req, tfm); return req; } +#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__)) /** * skcipher_request_free() - zeroize and free request data structure diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 724c45e3e9..9be85b821a 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -22,6 +22,9 @@ #ifndef __AMD_ASIC_TYPE_H__ #define __AMD_ASIC_TYPE_H__ + +#include <linux/types.h> + /* * Supported ASIC types */ diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h index e0c1050512..9764d6eb5b 100644 --- a/include/drm/bridge/samsung-dsim.h +++ b/include/drm/bridge/samsung-dsim.h @@ -11,9 +11,11 @@ #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_of.h> +#include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +struct platform_device; struct samsung_dsim; #define DSIM_STATE_ENABLED BIT(0) diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 4891bd916d..0b032faa8c 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1150,6 +1150,8 @@ #define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */ # define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0) +# define DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE GENMASK(1, 0) +# define DP_ADAPTIVE_SYNC_SDP_LENGTH GENMASK(5, 0) # define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1) # define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4) @@ -1639,10 +1641,12 @@ enum drm_dp_phy { #define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */ #define DP_SDP_ISRC 0x06 /* DP 1.2 */ #define DP_SDP_VSC 0x07 /* DP 1.2 */ +#define DP_SDP_ADAPTIVE_SYNC 0x22 /* DP 1.4 */ #define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */ #define DP_SDP_PPS 0x10 /* DP 1.4 */ #define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */ #define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ + /* 0x80+ CEA-861 infoframe types */ #define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b @@ -1798,4 +1802,11 @@ enum dp_content_type { DP_CONTENT_TYPE_GAME = 0x04, }; +enum operation_mode { + DP_AS_SDP_AVT_DYNAMIC_VTOTAL = 0x00, + DP_AS_SDP_AVT_FIXED_VTOTAL = 0x01, + DP_AS_SDP_FAVT_TRR_NOT_REACHED = 0x02, + DP_AS_SDP_FAVT_TRR_REACHED = 0x03 +}; + #endif /* _DRM_DP_H_ */ diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index a62fcd051d..8bed890eec 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -98,9 +98,39 @@ struct drm_dp_vsc_sdp { enum dp_content_type content_type; }; +/** + * struct drm_dp_as_sdp - drm DP Adaptive Sync SDP + * + * This structure represents a DP AS SDP of drm + * It is based on DP 2.1 spec [Table 2-126: Adaptive-Sync SDP Header Bytes] and + * [Table 2-127: Adaptive-Sync SDP Payload for DB0 through DB8] + * + * @sdp_type: Secondary-data packet type + * @revision: Revision Number + * @length: Number of valid data bytes + * @vtotal: Minimum Vertical Vtotal + * @target_rr: Target Refresh + * @duration_incr_ms: Successive frame duration increase + * @duration_decr_ms: Successive frame duration decrease + * @operation_mode: Adaptive Sync Operation Mode + */ +struct drm_dp_as_sdp { + unsigned char sdp_type; + unsigned char revision; + unsigned char length; + int vtotal; + int target_rr; + int duration_incr_ms; + int duration_decr_ms; + enum operation_mode mode; +}; + +void drm_dp_as_sdp_log(struct drm_printer *p, + const struct drm_dp_as_sdp *as_sdp); void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc); bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); @@ -222,6 +252,12 @@ drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } static inline bool +drm_dp_128b132b_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B; +} + +static inline bool drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { return dpcd[DP_EDP_CONFIGURATION_CAP] & @@ -422,7 +458,18 @@ struct drm_dp_aux { * @wait_hpd_asserted: wait for HPD to be asserted * * This is mainly useful for eDP panels drivers to wait for an eDP - * panel to finish powering on. This is an optional function. + * panel to finish powering on. It is optional for DP AUX controllers + * to implement this function. It is required for DP AUX endpoints + * (panel drivers) to call this function after powering up but before + * doing AUX transfers unless the DP AUX endpoint driver knows that + * we're not using the AUX controller's HPD. One example of the panel + * driver not needing to call this is if HPD is hooked up to a GPIO + * that the panel driver can read directly. + * + * If a DP AUX controller does not implement this function then it + * may still support eDP panels that use the AUX controller's built-in + * HPD signal by implementing a long wait for HPD in the transfer() + * callback, though this is deprecated. * * This function will efficiently wait for the HPD signal to be * asserted. The `wait_us` parameter that is passed in says that we @@ -722,7 +769,7 @@ static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel, #endif -#ifdef CONFIG_DRM_DP_CEC +#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CEC void drm_dp_cec_irq(struct drm_dp_aux *aux); void drm_dp_cec_register_connector(struct drm_dp_aux *aux, struct drm_connector *connector); diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 6c9145abc7..cfe096389d 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -83,7 +83,6 @@ struct drm_dp_mst_branch; * @passthrough_aux: parent aux to which DSC pass-through requests should be * sent, only set if DSC pass-through is possible. * @parent: branch device parent of this port - * @vcpi: Virtual Channel Payload info for this port. * @connector: DRM connector this port is connected to. Protected by * &drm_dp_mst_topology_mgr.base.lock. * @mgr: topology manager this port lives under. @@ -818,7 +817,28 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); -bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); +/** + * enum drm_dp_mst_mode - sink's MST mode capability + */ +enum drm_dp_mst_mode { + /** + * @DRM_DP_SST: The sink does not support MST nor single stream sideband + * messaging. + */ + DRM_DP_SST, + /** + * @DRM_DP_MST: Sink supports MST, more than one stream and single + * stream sideband messaging. + */ + DRM_DP_MST, + /** + * @DRM_DP_SST_SIDEBAND_MSG: Sink supports only one stream and single + * stream sideband messaging. + */ + DRM_DP_SST_SIDEBAND_MSG, +}; + +enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, @@ -927,6 +947,13 @@ int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *n void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); +static inline +bool drm_dp_mst_port_is_logical(struct drm_dp_mst_port *port) +{ + return port->port_num >= DP_MST_LOGICAL_PORT_0; +} + +struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port); struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); static inline struct drm_dp_mst_topology_state * diff --git a/include/drm/display/drm_dsc.h b/include/drm/display/drm_dsc.h index bc90273d06..bbbe743847 100644 --- a/include/drm/display/drm_dsc.h +++ b/include/drm/display/drm_dsc.h @@ -40,9 +40,6 @@ #define DSC_PPS_RC_RANGE_MINQP_SHIFT 11 #define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6 #define DSC_PPS_NATIVE_420_SHIFT 1 -#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16 -#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0 -#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13 /** * struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index a5b39fc010..2a74fa9d0c 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -25,6 +25,8 @@ #define DRM_BUDDY_RANGE_ALLOCATION BIT(0) #define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) #define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) +#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) +#define DRM_BUDDY_CLEARED BIT(4) struct drm_buddy_block { #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) @@ -32,8 +34,9 @@ struct drm_buddy_block { #define DRM_BUDDY_ALLOCATED (1 << 10) #define DRM_BUDDY_FREE (2 << 10) #define DRM_BUDDY_SPLIT (3 << 10) +#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) /* Free to be used, if needed in the future */ -#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6) +#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) #define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) u64 header; @@ -53,8 +56,8 @@ struct drm_buddy_block { struct list_head tmp_link; }; -/* Order-zero must be at least PAGE_SIZE */ -#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT) +/* Order-zero must be at least SZ_4K */ +#define DRM_BUDDY_MAX_ORDER (63 - 12) /* * Binary Buddy System. @@ -82,10 +85,11 @@ struct drm_buddy { unsigned int n_roots; unsigned int max_order; - /* Must be at least PAGE_SIZE */ + /* Must be at least SZ_4K */ u64 chunk_size; u64 size; u64 avail; + u64 clear_avail; }; static inline u64 @@ -113,6 +117,12 @@ drm_buddy_block_is_allocated(struct drm_buddy_block *block) } static inline bool +drm_buddy_block_is_clear(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_CLEAR; +} + +static inline bool drm_buddy_block_is_free(struct drm_buddy_block *block) { return drm_buddy_block_state(block) == DRM_BUDDY_FREE; @@ -150,7 +160,9 @@ int drm_buddy_block_trim(struct drm_buddy *mm, void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); -void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects); +void drm_buddy_free_list(struct drm_buddy *mm, + struct list_head *objects, + unsigned int flags); void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); void drm_buddy_block_print(struct drm_buddy *mm, diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index d47458ecda..bc0e66f9c4 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -141,6 +141,13 @@ struct drm_client_buffer { /** * @gem: GEM object backing this buffer + * + * FIXME: The dependency on GEM here isn't required, we could + * convert the driver handle to a dma-buf instead and use the + * backend-agnostic dma-buf vmap support instead. This would + * require that the handle2fd prime ioctl is reworked to pull the + * fd_install step out of the driver backend hooks, to make that + * final step optional for internal users. */ struct drm_gem_object *gem; @@ -159,6 +166,9 @@ struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); +int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, + struct iosys_map *map_copy); +void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer); int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h index b225eeb30d..1b4c98c2f8 100644 --- a/include/drm/drm_debugfs_crc.h +++ b/include/drm/drm_debugfs_crc.h @@ -22,13 +22,19 @@ #ifndef __DRM_DEBUGFS_CRC_H__ #define __DRM_DEBUGFS_CRC_H__ +#include <linux/spinlock_types.h> +#include <linux/types.h> +#include <linux/wait.h> + +struct drm_crtc; + #define DRM_MAX_CRC_NR 10 /** * struct drm_crtc_crc_entry - entry describing a frame's content * @has_frame_counter: whether the source was able to provide a frame number * @frame: number of the frame this CRC is about, if @has_frame_counter is true - * @crc: array of values that characterize the frame + * @crcs: array of values that characterize the frame */ struct drm_crtc_crc_entry { bool has_frame_counter; diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h deleted file mode 100644 index bc1f6b3781..0000000000 --- a/include/drm/drm_displayid.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright © 2014 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef DRM_DISPLAYID_H -#define DRM_DISPLAYID_H - -#include <linux/types.h> -#include <linux/bits.h> - -struct drm_edid; - -#define VESA_IEEE_OUI 0x3a0292 - -/* DisplayID Structure versions */ -#define DISPLAY_ID_STRUCTURE_VER_20 0x20 - -/* DisplayID Structure v1r2 Data Blocks */ -#define DATA_BLOCK_PRODUCT_ID 0x00 -#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 -#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 -#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 -#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 -#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 -#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 -#define DATA_BLOCK_VESA_TIMING 0x07 -#define DATA_BLOCK_CEA_TIMING 0x08 -#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 -#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a -#define DATA_BLOCK_GP_ASCII_STRING 0x0b -#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c -#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d -#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e -#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f -#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 -#define DATA_BLOCK_TILED_DISPLAY 0x12 -#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f -#define DATA_BLOCK_CTA 0x81 - -/* DisplayID Structure v2r0 Data Blocks */ -#define DATA_BLOCK_2_PRODUCT_ID 0x20 -#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21 -#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22 -#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23 -#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24 -#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25 -#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26 -#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27 -#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28 -#define DATA_BLOCK_2_CONTAINER_ID 0x29 -#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e -#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81 - -/* DisplayID Structure v1r2 Product Type */ -#define PRODUCT_TYPE_EXTENSION 0 -#define PRODUCT_TYPE_TEST 1 -#define PRODUCT_TYPE_PANEL 2 -#define PRODUCT_TYPE_MONITOR 3 -#define PRODUCT_TYPE_TV 4 -#define PRODUCT_TYPE_REPEATER 5 -#define PRODUCT_TYPE_DIRECT_DRIVE 6 - -/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */ -#define PRIMARY_USE_EXTENSION 0 -#define PRIMARY_USE_TEST 1 -#define PRIMARY_USE_GENERIC 2 -#define PRIMARY_USE_TV 3 -#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4 -#define PRIMARY_USE_DESKTOP_GAMING 5 -#define PRIMARY_USE_PRESENTATION 6 -#define PRIMARY_USE_HEAD_MOUNTED_VR 7 -#define PRIMARY_USE_HEAD_MOUNTED_AR 8 - -struct displayid_header { - u8 rev; - u8 bytes; - u8 prod_id; - u8 ext_count; -} __packed; - -struct displayid_block { - u8 tag; - u8 rev; - u8 num_bytes; -} __packed; - -struct displayid_tiled_block { - struct displayid_block base; - u8 tile_cap; - u8 topo[3]; - u8 tile_size[4]; - u8 tile_pixel_bezel[5]; - u8 topology_id[8]; -} __packed; - -struct displayid_detailed_timings_1 { - u8 pixel_clock[3]; - u8 flags; - u8 hactive[2]; - u8 hblank[2]; - u8 hsync[2]; - u8 hsw[2]; - u8 vactive[2]; - u8 vblank[2]; - u8 vsync[2]; - u8 vsw[2]; -} __packed; - -struct displayid_detailed_timing_block { - struct displayid_block base; - struct displayid_detailed_timings_1 timings[]; -}; - -#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0) -#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5) - -struct displayid_vesa_vendor_specific_block { - struct displayid_block base; - u8 oui[3]; - u8 data_structure_type; - u8 mso; -} __packed; - -/* - * DisplayID iteration. - * - * Do not access directly, this is private. - */ -struct displayid_iter { - const struct drm_edid *drm_edid; - - const u8 *section; - int length; - int idx; - int ext_index; - - u8 version; - u8 primary_use; -}; - -void displayid_iter_edid_begin(const struct drm_edid *drm_edid, - struct displayid_iter *iter); -const struct displayid_block * -__displayid_iter_next(struct displayid_iter *iter); -#define displayid_iter_for_each(__block, __iter) \ - while (((__block) = __displayid_iter_next(__iter))) -void displayid_iter_end(struct displayid_iter *iter); - -u8 displayid_version(const struct displayid_iter *iter); -u8 displayid_primary_use(const struct displayid_iter *iter); - -#endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 7923bc00dc..b085525e53 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -30,6 +30,7 @@ struct drm_connector; struct drm_device; struct drm_display_mode; struct drm_edid; +struct drm_printer; struct hdmi_avi_infoframe; struct hdmi_vendor_infoframe; struct i2c_adapter; @@ -272,14 +273,27 @@ struct detailed_timing { #define DRM_EDID_DSC_MAX_SLICES 0xf #define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f +struct drm_edid_product_id { + __be16 manufacturer_name; + __le16 product_code; + __le32 serial_number; + u8 week_of_manufacture; + u8 year_of_manufacture; +} __packed; + struct edid { u8 header[8]; /* Vendor & product info */ - u8 mfg_id[2]; - u8 prod_code[2]; - u32 serial; /* FIXME: byte order */ - u8 mfg_week; - u8 mfg_year; + union { + struct drm_edid_product_id product_id; + struct { + u8 mfg_id[2]; + u8 prod_code[2]; + u32 serial; /* FIXME: byte order */ + u8 mfg_week; + u8 mfg_year; + } __packed; + } __packed; /* EDID version */ u8 version; u8 revision; @@ -312,6 +326,13 @@ struct edid { u8 checksum; } __packed; +/* EDID matching */ +struct drm_edid_ident { + /* ID encoded by drm_edid_encode_panel_id() */ + u32 panel_id; + const char *name; +}; + #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8)) /* Short Audio Descriptor */ @@ -327,8 +348,6 @@ int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, const struct drm_display_mode *mode); -bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2); - int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, const struct drm_connector *connector, @@ -410,7 +429,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, void *data); struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter); -u32 drm_edid_get_panel_id(struct i2c_adapter *adapter); struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); struct edid *drm_edid_duplicate(const struct edid *edid); @@ -450,14 +468,19 @@ const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector, const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector, int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len), void *context); +const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter); const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); int drm_edid_connector_update(struct drm_connector *connector, const struct drm_edid *edid); int drm_edid_connector_add_modes(struct drm_connector *connector); bool drm_edid_is_digital(const struct drm_edid *drm_edid); - -const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid, - int ext_id, int *ext_index); +void drm_edid_get_product_id(const struct drm_edid *drm_edid, + struct drm_edid_product_id *id); +void drm_edid_print_product_id(struct drm_printer *p, + const struct drm_edid_product_id *id, bool raw); +u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid); +bool drm_edid_match(const struct drm_edid *drm_edid, + const struct drm_edid_ident *ident); #endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 7214101fd7..49172166a1 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -34,12 +34,6 @@ /** * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver - * @set_config: Initialize any encoder-specific modesetting parameters. - * The meaning of the @params parameter is implementation - * dependent. It will usually be a structure with DVO port - * data format settings or timings. It's not required for - * the new parameters to take effect until the next mode - * is set. * * Most of its members are analogous to the function pointers in * &drm_encoder_helper_funcs and they can optionally be used to @@ -48,41 +42,85 @@ * if the encoder is the currently selected one for the connector. */ struct drm_encoder_slave_funcs { + /** + * @set_config: Initialize any encoder-specific modesetting parameters. + * The meaning of the @params parameter is implementation dependent. It + * will usually be a structure with DVO port data format settings or + * timings. It's not required for the new parameters to take effect + * until the next mode is set. + */ void (*set_config)(struct drm_encoder *encoder, void *params); + /** + * @destroy: Analogous to &drm_encoder_funcs @destroy callback. + */ void (*destroy)(struct drm_encoder *encoder); + + /** + * @dpms: Analogous to &drm_encoder_helper_funcs @dpms callback. Wrapped + * by drm_i2c_encoder_dpms(). + */ void (*dpms)(struct drm_encoder *encoder, int mode); + + /** + * @save: Save state. Wrapped by drm_i2c_encoder_save(). + */ void (*save)(struct drm_encoder *encoder); + + /** + * @restore: Restore state. Wrapped by drm_i2c_encoder_restore(). + */ void (*restore)(struct drm_encoder *encoder); + + /** + * @mode_fixup: Analogous to &drm_encoder_helper_funcs @mode_fixup + * callback. Wrapped by drm_i2c_encoder_mode_fixup(). + */ bool (*mode_fixup)(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + + /** + * @mode_valid: Analogous to &drm_encoder_helper_funcs @mode_valid. + */ int (*mode_valid)(struct drm_encoder *encoder, struct drm_display_mode *mode); + /** + * @mode_set: Analogous to &drm_encoder_helper_funcs @mode_set + * callback. Wrapped by drm_i2c_encoder_mode_set(). + */ void (*mode_set)(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @detect: Analogous to &drm_encoder_helper_funcs @detect + * callback. Wrapped by drm_i2c_encoder_detect(). + */ enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector); + /** + * @get_modes: Get modes. + */ int (*get_modes)(struct drm_encoder *encoder, struct drm_connector *connector); + /** + * @create_resources: Create resources. + */ int (*create_resources)(struct drm_encoder *encoder, struct drm_connector *connector); + /** + * @set_property: Set property. + */ int (*set_property)(struct drm_encoder *encoder, struct drm_connector *connector, struct drm_property *property, uint64_t val); - }; /** * struct drm_encoder_slave - Slave encoder struct - * @base: DRM encoder object. - * @slave_funcs: Slave encoder callbacks. - * @slave_priv: Slave encoder private data. - * @bus_priv: Bus specific data. * * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the * ones in @base. The former are never actually called by the common @@ -95,10 +133,24 @@ struct drm_encoder_slave_funcs { * this. */ struct drm_encoder_slave { + /** + * @base: DRM encoder object. + */ struct drm_encoder base; + /** + * @slave_funcs: Slave encoder callbacks. + */ const struct drm_encoder_slave_funcs *slave_funcs; + + /** + * @slave_priv: Slave encoder private data. + */ void *slave_priv; + + /** + * @bus_priv: Bus specific data. + */ void *bus_priv; }; #define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base) @@ -112,16 +164,20 @@ int drm_i2c_encoder_init(struct drm_device *dev, /** * struct drm_i2c_encoder_driver * - * Describes a device driver for an encoder connected to the GPU - * through an I2C bus. In addition to the entry points in @i2c_driver - * an @encoder_init function should be provided. It will be called to - * give the driver an opportunity to allocate any per-encoder data - * structures and to initialize the @slave_funcs and (optionally) - * @slave_priv members of @encoder. + * Describes a device driver for an encoder connected to the GPU through an I2C + * bus. */ struct drm_i2c_encoder_driver { + /** + * @i2c_driver: I2C device driver description. + */ struct i2c_driver i2c_driver; + /** + * @encoder_init: Callback to allocate any per-encoder data structures + * and to initialize the @slave_funcs and (optionally) @slave_priv + * members of @encoder. + */ int (*encoder_init)(struct i2c_client *client, struct drm_device *dev, struct drm_encoder_slave *encoder); @@ -133,6 +189,7 @@ struct drm_i2c_encoder_driver { /** * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder + * @encoder: The encoder */ static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder) { diff --git a/include/drm/drm_fb_dma_helper.h b/include/drm/drm_fb_dma_helper.h index d5e036c578..c950732c6d 100644 --- a/include/drm/drm_fb_dma_helper.h +++ b/include/drm/drm_fb_dma_helper.h @@ -6,7 +6,9 @@ struct drm_device; struct drm_framebuffer; +struct drm_plane; struct drm_plane_state; +struct drm_scanout_buffer; struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); @@ -19,5 +21,8 @@ void drm_fb_dma_sync_non_coherent(struct drm_device *drm, struct drm_plane_state *old_state, struct drm_plane_state *state); +int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb); + #endif diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index f13b34e0b7..428d81afe2 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -25,6 +25,7 @@ struct iosys_map; * All fields are considered private. */ struct drm_format_conv_state { + /* private: */ struct { void *mem; size_t size; diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 2ebec3984c..bae4865b21 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -527,6 +527,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj); void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); +void drm_gem_lock(struct drm_gem_object *obj); +void drm_gem_unlock(struct drm_gem_object *obj); + int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index bf0c31aa8f..efbc9f2731 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -108,6 +108,9 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); +int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem); + int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) @@ -173,7 +176,7 @@ static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_pin(shmem); + return drm_gem_shmem_pin_locked(shmem); } /** @@ -187,7 +190,7 @@ static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_unpin(shmem); + drm_gem_shmem_unpin_locked(shmem); } /** diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 2938ba8075..9a73f786f4 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -170,7 +170,6 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( * @vram_base: Base address of the managed video memory * @vram_size: Size of the managed video memory in bytes * @bdev: The TTM BO device. - * @funcs: TTM BO functions * * The fields &struct drm_vram_mm.vram_base and * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index 6e99627edf..e7cc17ee49 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -75,7 +75,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test, * @_dev: The parent device object * @_type: the type of the struct which contains struct &drm_device * @_member: the name of the &drm_device within @_type. - * @_features: Mocked DRM device driver features + * @_feat: Mocked DRM device driver features * * This function creates a struct &drm_driver and will create a struct * &drm_device from @_dev and that driver. diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h index 5c9ef6a2ae..53545b4ca9 100644 --- a/include/drm/drm_lease.h +++ b/include/drm/drm_lease.h @@ -6,6 +6,8 @@ #ifndef _DRM_LEASE_H_ #define _DRM_LEASE_H_ +#include <linux/types.h> + struct drm_file; struct drm_device; struct drm_master; diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 3011d33ecc..e0f56564bf 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -226,6 +226,12 @@ static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) return -EINVAL; } +enum mipi_dsi_compression_algo { + MIPI_DSI_COMPRESSION_DSC = 0, + MIPI_DSI_COMPRESSION_VENDOR = 3, + /* other two values are reserved, DSI 1.3 */ +}; + struct mipi_dsi_device * mipi_dsi_device_register_full(struct mipi_dsi_host *host, const struct mipi_dsi_device_info *info); @@ -242,6 +248,9 @@ int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, u16 value); int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable); +int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable, + enum mipi_dsi_compression_algo algo, + unsigned int pps_selector); int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi, const struct drm_dsc_picture_parameter_set *pps); @@ -305,17 +314,17 @@ int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, * @dsi: DSI peripheral device * @seq: buffer containing the payload */ -#define mipi_dsi_generic_write_seq(dsi, seq...) \ - do { \ - static const u8 d[] = { seq }; \ - struct device *dev = &dsi->dev; \ - int ret; \ - ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) { \ - dev_err_ratelimited(dev, "transmit data failed: %d\n", \ - ret); \ - return ret; \ - } \ +#define mipi_dsi_generic_write_seq(dsi, seq...) \ + do { \ + static const u8 d[] = { seq }; \ + struct device *dev = &dsi->dev; \ + ssize_t ret; \ + ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) { \ + dev_err_ratelimited(dev, "transmit data failed: %zd\n", \ + ret); \ + return ret; \ + } \ } while (0) /** @@ -324,18 +333,18 @@ int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, * @cmd: Command * @seq: buffer containing data to be transmitted */ -#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \ - do { \ - static const u8 d[] = { cmd, seq }; \ - struct device *dev = &dsi->dev; \ - int ret; \ - ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) { \ - dev_err_ratelimited( \ - dev, "sending command %#02x failed: %d\n", \ - cmd, ret); \ - return ret; \ - } \ +#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \ + do { \ + static const u8 d[] = { cmd, seq }; \ + struct device *dev = &dsi->dev; \ + ssize_t ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) { \ + dev_err_ratelimited( \ + dev, "sending command %#02x failed: %zd\n", \ + cmd, ret); \ + return ret; \ + } \ } while (0) /** diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 973119a917..8de3c9a5f6 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -506,6 +506,16 @@ struct drm_mode_config { struct list_head plane_list; /** + * @panic_lock: + * + * Raw spinlock used to protect critical sections of code that access + * the display hardware or modeset software state, which the panic + * printing code must be protected against. See drm_panic_trylock(), + * drm_panic_lock() and drm_panic_unlock(). + */ + struct raw_spinlock panic_lock; + + /** * @num_crtc: * * Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime @@ -942,6 +952,11 @@ struct drm_mode_config { */ struct drm_property *modifiers_property; + /** + * @size_hints_property: Plane SIZE_HINTS property. + */ + struct drm_property *size_hints_property; + /* cursor size */ uint32_t cursor_width, cursor_height; diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 9ed4246954..ec59015aec 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -48,6 +48,7 @@ * To make this clear all the helper vtables are pulled together in this location here. */ +struct drm_scanout_buffer; struct drm_writeback_connector; struct drm_writeback_job; @@ -1443,6 +1444,44 @@ struct drm_plane_helper_funcs { */ void (*atomic_async_update)(struct drm_plane *plane, struct drm_atomic_state *state); + + /** + * @get_scanout_buffer: + * + * Get the current scanout buffer, to display a message with drm_panic. + * The driver should do the minimum changes to provide a buffer, + * that can be used to display the panic screen. Currently only linear + * buffers are supported. Non-linear buffer support is on the TODO list. + * The device &dev.mode_config.panic_lock is taken before calling this + * function, so you can safely access the &plane.state + * It is called from a panic callback, and must follow its restrictions. + * Please look the documentation at drm_panic_trylock() for an in-depth + * discussions of what's safe and what is not allowed. + * It's a best effort mode, so it's expected that in some complex cases + * the panic screen won't be displayed. + * The returned &drm_scanout_buffer.map must be valid if no error code is + * returned. + * + * Return: + * %0 on success, negative errno on failure. + */ + int (*get_scanout_buffer)(struct drm_plane *plane, + struct drm_scanout_buffer *sb); + + /** + * @panic_flush: + * + * It is used by drm_panic, and is called after the panic screen is + * drawn to the scanout buffer. In this function, the driver + * can send additional commands to the hardware, to make the scanout + * buffer visible. + * It is only called if get_scanout_buffer() returned successfully, and + * the &dev.mode_config.panic_lock is held during the entire sequence. + * It is called from a panic callback, and must follow its restrictions. + * Please look the documentation at drm_panic_trylock() for an in-depth + * discussions of what's safe and what is not allowed. + */ + void (*panic_flush)(struct drm_plane *plane); }; /** diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 082a6e980d..02d1cdd7f7 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -2,6 +2,7 @@ #ifndef __DRM_OF_H__ #define __DRM_OF_H__ +#include <linux/err.h> #include <linux/of_graph.h> #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) #include <drm/drm_bridge.h> diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h new file mode 100644 index 0000000000..822dbb1aa9 --- /dev/null +++ b/include/drm/drm_panic.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +#ifndef __DRM_PANIC_H__ +#define __DRM_PANIC_H__ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/iosys-map.h> + +#include <drm/drm_device.h> +#include <drm/drm_fourcc.h> +/* + * Copyright (c) 2024 Intel + */ + +/** + * struct drm_scanout_buffer - DRM scanout buffer + * + * This structure holds the information necessary for drm_panic to draw the + * panic screen, and display it. + */ +struct drm_scanout_buffer { + /** + * @format: + * + * drm format of the scanout buffer. + */ + const struct drm_format_info *format; + + /** + * @map: + * + * Virtual address of the scanout buffer, either in memory or iomem. + * The scanout buffer should be in linear format, and can be directly + * sent to the display hardware. Tearing is not an issue for the panic + * screen. + */ + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + + /** + * @width: Width of the scanout buffer, in pixels. + */ + unsigned int width; + + /** + * @height: Height of the scanout buffer, in pixels. + */ + unsigned int height; + + /** + * @pitch: Length in bytes between the start of two consecutive lines. + */ + unsigned int pitch[DRM_FORMAT_MAX_PLANES]; +}; + +/** + * drm_panic_trylock - try to enter the panic printing critical section + * @dev: struct drm_device + * @flags: unsigned long irq flags you need to pass to the unlock() counterpart + * + * This function must be called by any panic printing code. The panic printing + * attempt must be aborted if the trylock fails. + * + * Panic printing code can make the following assumptions while holding the + * panic lock: + * + * - Anything protected by drm_panic_lock() and drm_panic_unlock() pairs is safe + * to access. + * + * - Furthermore the panic printing code only registers in drm_dev_unregister() + * and gets removed in drm_dev_unregister(). This allows the panic code to + * safely access any state which is invariant in between these two function + * calls, like the list of planes &drm_mode_config.plane_list or most of the + * struct drm_plane structure. + * + * Specifically thanks to the protection around plane updates in + * drm_atomic_helper_swap_state() the following additional guarantees hold: + * + * - It is safe to deference the drm_plane.state pointer. + * + * - Anything in struct drm_plane_state or the driver's subclass thereof which + * stays invariant after the atomic check code has finished is safe to access. + * Specifically this includes the reference counted pointers to framebuffer + * and buffer objects. + * + * - Anything set up by &drm_plane_helper_funcs.fb_prepare and cleaned up + * &drm_plane_helper_funcs.fb_cleanup is safe to access, as long as it stays + * invariant between these two calls. This also means that for drivers using + * dynamic buffer management the framebuffer is pinned, and therefer all + * relevant datastructures can be accessed without taking any further locks + * (which would be impossible in panic context anyway). + * + * - Importantly, software and hardware state set up by + * &drm_plane_helper_funcs.begin_fb_access and + * &drm_plane_helper_funcs.end_fb_access is not safe to access. + * + * Drivers must not make any assumptions about the actual state of the hardware, + * unless they explicitly protected these hardware access with drm_panic_lock() + * and drm_panic_unlock(). + * + * Return: + * %0 when failing to acquire the raw spinlock, nonzero on success. + */ +#define drm_panic_trylock(dev, flags) \ + raw_spin_trylock_irqsave(&(dev)->mode_config.panic_lock, flags) + +/** + * drm_panic_lock - protect panic printing relevant state + * @dev: struct drm_device + * @flags: unsigned long irq flags you need to pass to the unlock() counterpart + * + * This function must be called to protect software and hardware state that the + * panic printing code must be able to rely on. The protected sections must be + * as small as possible. It uses the irqsave/irqrestore variant, and can be + * called from irq handler. Examples include: + * + * - Access to peek/poke or other similar registers, if that is the way the + * driver prints the pixels into the scanout buffer at panic time. + * + * - Updates to pointers like &drm_plane.state, allowing the panic handler to + * safely deference these. This is done in drm_atomic_helper_swap_state(). + * + * - An state that isn't invariant and that the driver must be able to access + * during panic printing. + */ + +#define drm_panic_lock(dev, flags) \ + raw_spin_lock_irqsave(&(dev)->mode_config.panic_lock, flags) + +/** + * drm_panic_unlock - end of the panic printing critical section + * @dev: struct drm_device + * @flags: irq flags that were returned when acquiring the lock + * + * Unlocks the raw spinlock acquired by either drm_panic_lock() or + * drm_panic_trylock(). + */ +#define drm_panic_unlock(dev, flags) \ + raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags) + +#ifdef CONFIG_DRM_PANIC + +void drm_panic_register(struct drm_device *dev); +void drm_panic_unregister(struct drm_device *dev); + +#else + +static inline void drm_panic_register(struct drm_device *dev) {} +static inline void drm_panic_unregister(struct drm_device *dev) {} + +#endif + +#endif /* __DRM_PANIC_H__ */ diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 641fe29805..9507542121 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -25,6 +25,7 @@ #include <linux/list.h> #include <linux/ctype.h> +#include <linux/kmsg_dump.h> #include <drm/drm_mode_object.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_rect.h> @@ -32,6 +33,7 @@ #include <drm/drm_util.h> struct drm_crtc; +struct drm_plane_size_hint; struct drm_printer; struct drm_modeset_acquire_ctx; @@ -779,6 +781,11 @@ struct drm_plane { * @hotspot_y_property: property to set mouse hotspot y offset. */ struct drm_property *hotspot_y_property; + + /** + * @kmsg_panic: Used to register a panic notifier for this plane + */ + struct kmsg_dumper kmsg_panic; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) @@ -976,5 +983,8 @@ drm_plane_get_damage_clips(const struct drm_plane_state *state); int drm_plane_create_scaling_filter_property(struct drm_plane *plane, unsigned int supported_filters); +int drm_plane_add_size_hints_property(struct drm_plane *plane, + const struct drm_plane_size_hint *hints, + int num_hints); #endif diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index 9cc473e5d3..089950ad86 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -28,14 +28,14 @@ #include <linux/compiler.h> #include <linux/printk.h> -#include <linux/seq_file.h> #include <linux/device.h> -#include <linux/debugfs.h> #include <linux/dynamic_debug.h> #include <drm/drm.h> +struct debugfs_regset32; struct drm_device; +struct seq_file; /* Do *not* use outside of drm_print.[ch]! */ extern unsigned long __drm_debug; diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 62741a8879..d6ce7b218b 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -16,6 +16,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector int drm_helper_probe_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force); + +int drmm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_fini(struct drm_device *dev); bool drm_helper_hpd_irq_event(struct drm_device *dev); @@ -37,4 +39,8 @@ int drm_connector_helper_get_modes_fixed(struct drm_connector *connector, int drm_connector_helper_get_modes(struct drm_connector *connector); int drm_connector_helper_tv_get_modes(struct drm_connector *connector); +int drm_connector_helper_detect_from_ddc(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force); + #endif diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h index c2188bb0b1..7ba72a81a8 100644 --- a/include/drm/drm_suballoc.h +++ b/include/drm/drm_suballoc.h @@ -37,7 +37,7 @@ struct drm_suballoc_manager { * @manager: The drm_suballoc_manager. * @soffset: Start offset. * @eoffset: End offset + 1 so that @eoffset - @soffset = size. - * @dma_fence: The fence protecting the allocation. + * @fence: The fence protecting the allocation. */ struct drm_suballoc { struct list_head olist; diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 7f3957943d..c8f829b430 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -225,6 +225,7 @@ struct drm_vblank_crtc { wait_queue_head_t work_wait_queue; }; +struct drm_vblank_crtc *drm_crtc_vblank_crtc(struct drm_crtc *crtc); int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); bool drm_dev_has_vblank(const struct drm_device *dev); u64 drm_crtc_vblank_count(struct drm_crtc *crtc); diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h deleted file mode 100644 index 228f43e8df..0000000000 --- a/include/drm/gma_drm.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/************************************************************************** - * Copyright (c) 2007-2011, Intel Corporation. - * All Rights Reserved. - * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. - * All Rights Reserved. - * - **************************************************************************/ - -#ifndef _GMA_DRM_H_ -#define _GMA_DRM_H_ - -#endif diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h index 8390b437a1..5305b9797f 100644 --- a/include/drm/i2c/ch7006.h +++ b/include/drm/i2c/ch7006.h @@ -37,6 +37,7 @@ * meaning. */ struct ch7006_encoder_params { + /* private: FIXME: document the members */ enum { CH7006_FORMAT_RGB16 = 0, CH7006_FORMAT_YCrCb24m16, diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h index 205e27384c..ddf248693c 100644 --- a/include/drm/i2c/sil164.h +++ b/include/drm/i2c/sil164.h @@ -36,6 +36,7 @@ * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf". */ struct sil164_encoder_params { + /* private: FIXME: document the members */ enum { SIL164_INPUT_EDGE_FALLING = 0, SIL164_INPUT_EDGE_RISING diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index 56a84ee1c6..4ea3b17aa1 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -24,7 +24,7 @@ #ifndef _I915_COMPONENT_H_ #define _I915_COMPONENT_H_ -#include "drm_audio_component.h" +#include <drm/drm_audio_component.h> enum i915_component_type { I915_COMPONENT_AUDIO = 1, diff --git a/include/drm/i915_gsc_proxy_mei_interface.h b/include/drm/i915_gsc_proxy_mei_interface.h index 9462341d3a..850dfbf406 100644 --- a/include/drm/i915_gsc_proxy_mei_interface.h +++ b/include/drm/i915_gsc_proxy_mei_interface.h @@ -21,7 +21,7 @@ struct i915_gsc_proxy_component_ops { struct module *owner; /** - * send - Sends a proxy message to ME FW. + * @send: Sends a proxy message to ME FW. * @dev: device struct corresponding to the mei device * @buf: message buffer to send * @size: size of the message @@ -30,7 +30,7 @@ struct i915_gsc_proxy_component_ops { int (*send)(struct device *dev, const void *buf, size_t size); /** - * recv - Receives a proxy message from ME FW. + * @recv: Receives a proxy message from ME FW. * @dev: device struct corresponding to the mei device * @buf: message buffer to contain the received message * @size: size of the buffer diff --git a/include/drm/i915_hdcp_interface.h b/include/drm/i915_hdcp_interface.h index 4c9c8167c2..d776ed7dcd 100644 --- a/include/drm/i915_hdcp_interface.h +++ b/include/drm/i915_hdcp_interface.h @@ -54,7 +54,7 @@ enum hdcp_ddi { }; /** - * enum hdcp_tc - ME/GSC Firmware defined index for transcoders + * enum hdcp_transcoder - ME/GSC Firmware defined index for transcoders * @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder * @HDCP_TRANSCODER_EDP: Index for EDP Transcoder * @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder @@ -106,7 +106,7 @@ struct hdcp_port_data { * And Prepare AKE_Init. * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate * AKE_Send_Cert and prepare - AKE_Stored_Km/AKE_No_Stored_Km + * AKE_Stored_Km/AKE_No_Stored_Km * @verify_hprime: Verify AKE_Send_H_prime * @store_pairing_info: Store pairing info received * @initiate_locality_check: Prepare LC_Init @@ -170,14 +170,22 @@ struct i915_hdcp_ops { /** * struct i915_hdcp_arbiter - Used for communication between i915 * and hdcp drivers for the HDCP2.2 services - * @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus. - * @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver. */ struct i915_hdcp_arbiter { + /** + * @hdcp_dev: device that provides the HDCP2.2 service from MEI Bus. + */ struct device *hdcp_dev; + + /** + * @ops: Ops implemented by hdcp driver or intel_hdcp_gsc, used by i915 + * driver. + */ const struct i915_hdcp_ops *ops; - /* To protect the above members. */ + /** + * @mutex: To protect the above members. + */ struct mutex mutex; }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 28a96aa1e0..85ce33ad6e 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -711,7 +711,9 @@ INTEL_VGA_DEVICE(0x5692, info), \ INTEL_VGA_DEVICE(0x56A0, info), \ INTEL_VGA_DEVICE(0x56A1, info), \ - INTEL_VGA_DEVICE(0x56A2, info) + INTEL_VGA_DEVICE(0x56A2, info), \ + INTEL_VGA_DEVICE(0x56BE, info), \ + INTEL_VGA_DEVICE(0x56BF, info) #define INTEL_DG2_G11_IDS(info) \ INTEL_VGA_DEVICE(0x5693, info), \ diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h index 7d96985f2d..a532d32f58 100644 --- a/include/drm/i915_pxp_tee_interface.h +++ b/include/drm/i915_pxp_tee_interface.h @@ -12,20 +12,26 @@ struct scatterlist; /** * struct i915_pxp_component_ops - ops for PXP services. - * @owner: Module providing the ops - * @send: sends data to PXP - * @receive: receives data from PXP */ struct i915_pxp_component_ops { /** - * @owner: owner of the module provding the ops + * @owner: Module providing the ops. */ struct module *owner; + /** + * @send: Send a PXP message. + */ int (*send)(struct device *dev, const void *message, size_t size, unsigned long timeout_ms); + /** + * @recv: Receive a PXP message. + */ int (*recv)(struct device *dev, void *buffer, size_t size, unsigned long timeout_ms); + /** + * @gsc_command: Send a GSC command. + */ ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id, struct scatterlist *sg_in, size_t total_in_len, struct scatterlist *sg_out); @@ -35,14 +41,21 @@ struct i915_pxp_component_ops { /** * struct i915_pxp_component - Used for communication between i915 and TEE * drivers for the PXP services - * @tee_dev: device that provide the PXP service from TEE Bus. - * @pxp_ops: Ops implemented by TEE driver, used by i915 driver. */ struct i915_pxp_component { + /** + * @tee_dev: device that provide the PXP service from TEE Bus. + */ struct device *tee_dev; + + /** + * @ops: Ops implemented by TEE driver, used by i915 driver. + */ const struct i915_pxp_component_ops *ops; - /* To protect the above members. */ + /** + * @mutex: To protect the above members. + */ struct mutex mutex; }; diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 0223a41a64..6ccf96c91f 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -83,6 +83,9 @@ enum ttm_bo_type { * @resource: structure describing current placement. * @ttm: TTM structure holding system pages. * @deleted: True if the object is only a zombie and already deleted. + * @bulk_move: The bulk move object. + * @priority: Priority for LRU, BOs with lower priority are evicted first. + * @pin_count: Pin count. * * Base class for TTM buffer object, that deals with data placement and CPU * mappings. GPU mappings are really up to the driver, but for simpler GPUs @@ -128,26 +131,27 @@ struct ttm_buffer_object { struct work_struct delayed_delete; /** - * Special members that are protected by the reserve lock - * and the bo::lock when written to. Can be read with - * either of these locks held. + * @sg: external source of pages and DMA addresses, protected by the + * reservation lock. */ struct sg_table *sg; }; +#define TTM_BO_MAP_IOMEM_MASK 0x80 + /** * struct ttm_bo_kmap_obj * * @virtual: The current kernel virtual address. * @page: The page when kmap'ing a single page. * @bo_kmap_type: Type of bo_kmap. + * @bo: The TTM BO. * * Object describing a kernel mapping. Since a TTM bo may be located * in various memory types with various caching policies, the * mapping can either be an ioremap, a vmap, a kmap or part of a * premapped region. */ -#define TTM_BO_MAP_IOMEM_MASK 0x80 struct ttm_bo_kmap_obj { void *virtual; struct page *page; @@ -171,6 +175,7 @@ struct ttm_bo_kmap_obj { * @force_alloc: Don't check the memory account during suspend or CPU page * faults. Should only be used by TTM internally. * @resv: Reservation object to allow reserved evictions with. + * @bytes_moved: Statistics on how many bytes have been moved. * * Context for TTM operations like changing buffer placement or general memory * allocation. @@ -264,7 +269,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, * ttm_bo_reserve_slowpath: * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. - * @sequence: Set (@bo)->sequence to this value after lock + * @ticket: Ticket used to acquire the ww_mutex. * * This is called after ttm_bo_reserve returns -EAGAIN and we backed off * from all our other reservations. Because there are no other reservations @@ -303,7 +308,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, } /** - * ttm_bo_move_null = assign memory for a buffer object. + * ttm_bo_move_null - assign memory for a buffer object. * @bo: The bo to assign the memory to * @new_mem: The memory to be assigned. * diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h index 235a743d90..a18f43e93a 100644 --- a/include/drm/ttm/ttm_caching.h +++ b/include/drm/ttm/ttm_caching.h @@ -25,6 +25,8 @@ #ifndef _TTM_CACHING_H_ #define _TTM_CACHING_H_ +#include <linux/pgtable.h> + #define TTM_NUM_CACHING_TYPES 3 /** diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 03aca29d3c..fac1e3e57e 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -52,7 +52,7 @@ struct ttm_validate_buffer { }; /** - * function ttm_eu_backoff_reservation + * ttm_eu_backoff_reservation * * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. @@ -64,14 +64,13 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, struct list_head *list); /** - * function ttm_eu_reserve_buffers + * ttm_eu_reserve_buffers * * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only * non-blocking reserves should be tried. * @list: thread private list of ttm_validate_buffer structs. * @intr: should the wait be interruptible * @dups: [out] optional list of duplicates. - * @del_lru: true if BOs should be removed from the LRU. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -102,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct list_head *dups); /** - * function ttm_eu_fence_buffer_objects. + * ttm_eu_fence_buffer_objects * * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h index cc5c09a211..fe72631a6e 100644 --- a/include/drm/ttm/ttm_kmap_iter.h +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -20,7 +20,7 @@ struct iosys_map; */ struct ttm_kmap_iter_ops { /** - * kmap_local() - Map a PAGE_SIZE part of the resource using + * @map_local: Map a PAGE_SIZE part of the resource using * kmap_local semantics. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. @@ -31,7 +31,7 @@ struct ttm_kmap_iter_ops { void (*map_local)(struct ttm_kmap_iter *res_iter, struct iosys_map *dmap, pgoff_t i); /** - * unmap_local() - Unmap a PAGE_SIZE part of the resource previously + * @unmap_local: Unmap a PAGE_SIZE part of the resource previously * mapped using kmap_local. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h index 4490d43c63..160d954a26 100644 --- a/include/drm/ttm/ttm_pool.h +++ b/include/drm/ttm/ttm_pool.h @@ -32,9 +32,10 @@ #include <drm/ttm/ttm_caching.h> struct device; -struct ttm_tt; -struct ttm_pool; +struct seq_file; struct ttm_operation_ctx; +struct ttm_pool; +struct ttm_tt; /** * struct ttm_pool_type - Pool for a certain memory type diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 1afa13f0c2..6976935513 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -251,6 +251,9 @@ struct ttm_lru_bulk_move_pos { * * Container for the current bulk move state. Should be used with * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). + * All BOs in a bulk_move structure need to share the same reservation object to + * ensure that the bulk as a whole is locked for eviction even if only one BO of + * the bulk is evicted. */ struct ttm_lru_bulk_move { struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY]; @@ -366,7 +369,8 @@ bool ttm_resource_intersects(struct ttm_device *bdev, const struct ttm_place *place, size_t size); bool ttm_resource_compatible(struct ttm_resource *res, - struct ttm_placement *placement); + struct ttm_placement *placement, + bool evicting); void ttm_resource_set_bo(struct ttm_resource *res, struct ttm_buffer_object *bo); diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h index de1a344737..adb37bc541 100644 --- a/include/drm/xe_pciids.h +++ b/include/drm/xe_pciids.h @@ -134,7 +134,9 @@ MACRO__(0x5692, ## __VA_ARGS__), \ MACRO__(0x56A0, ## __VA_ARGS__), \ MACRO__(0x56A1, ## __VA_ARGS__), \ - MACRO__(0x56A2, ## __VA_ARGS__) + MACRO__(0x56A2, ## __VA_ARGS__), \ + MACRO__(0x56BE, ## __VA_ARGS__), \ + MACRO__(0x56BF, ## __VA_ARGS__) #define XE_DG2_G11_IDS(MACRO__, ...) \ MACRO__(0x5693, ## __VA_ARGS__), \ @@ -176,10 +178,13 @@ /* MTL / ARL */ #define XE_MTL_IDS(MACRO__, ...) \ MACRO__(0x7D40, ## __VA_ARGS__), \ + MACRO__(0x7D41, ## __VA_ARGS__), \ MACRO__(0x7D45, ## __VA_ARGS__), \ + MACRO__(0x7D51, ## __VA_ARGS__), \ MACRO__(0x7D55, ## __VA_ARGS__), \ MACRO__(0x7D60, ## __VA_ARGS__), \ MACRO__(0x7D67, ## __VA_ARGS__), \ + MACRO__(0x7DD1, ## __VA_ARGS__), \ MACRO__(0x7DD5, ## __VA_ARGS__) #define XE_LNL_IDS(MACRO__, ...) \ diff --git a/include/dt-bindings/arm/mhuv3-dt.h b/include/dt-bindings/arm/mhuv3-dt.h new file mode 100644 index 0000000000..4575406919 --- /dev/null +++ b/include/dt-bindings/arm/mhuv3-dt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for the defined MHUv3 types. + */ + +#ifndef _DT_BINDINGS_ARM_MHUV3_DT_H +#define _DT_BINDINGS_ARM_MHUV3_DT_H + +#define DBE_EXT 0 +#define FCE_EXT 1 +#define FE_EXT 2 + +#endif /* _DT_BINDINGS_ARM_MHUV3_DT_H */ diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h index 19ac7b36f6..d040033dc8 100644 --- a/include/dt-bindings/arm/qcom,ids.h +++ b/include/dt-bindings/arm/qcom,ids.h @@ -258,6 +258,7 @@ #define QCOM_ID_QRU1000 539 #define QCOM_ID_SM8475_2 540 #define QCOM_ID_QDU1000 545 +#define QCOM_ID_X1E80100 555 #define QCOM_ID_SM8650 557 #define QCOM_ID_SM4450 568 #define QCOM_ID_QDU1010 587 diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h index 3dac357778..442f9e9037 100644 --- a/include/dt-bindings/clock/google,gs101.h +++ b/include/dt-bindings/clock/google,gs101.h @@ -313,6 +313,122 @@ #define CLK_APM_PLL_DIV4_APM 70 #define CLK_APM_PLL_DIV16_APM 71 +/* CMU_HSI0 */ +#define CLK_FOUT_USB_PLL 1 +#define CLK_MOUT_PLL_USB 2 +#define CLK_MOUT_HSI0_ALT_USER 3 +#define CLK_MOUT_HSI0_BUS_USER 4 +#define CLK_MOUT_HSI0_DPGTC_USER 5 +#define CLK_MOUT_HSI0_TCXO_USER 6 +#define CLK_MOUT_HSI0_USB20_USER 7 +#define CLK_MOUT_HSI0_USB31DRD_USER 8 +#define CLK_MOUT_HSI0_USBDPDBG_USER 9 +#define CLK_MOUT_HSI0_BUS 10 +#define CLK_MOUT_HSI0_USB20_REF 11 +#define CLK_MOUT_HSI0_USB31DRD 12 +#define CLK_DOUT_HSI0_USB31DRD 13 +#define CLK_GOUT_HSI0_PCLK 14 +#define CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_SUSPEND_CLK_26 15 +#define CLK_GOUT_HSI0_CLK_HSI0_ALT 16 +#define CLK_GOUT_HSI0_DP_LINK_I_DP_GTC_CLK 17 +#define CLK_GOUT_HSI0_DP_LINK_I_PCLK 18 +#define CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK 19 +#define CLK_GOUT_HSI0_ETR_MIU_I_ACLK 20 +#define CLK_GOUT_HSI0_ETR_MIU_I_PCLK 21 +#define CLK_GOUT_HSI0_GPC_HSI0_PCLK 22 +#define CLK_GOUT_HSI0_LHM_AXI_G_ETR_HSI0_I_CLK 23 +#define CLK_GOUT_HSI0_LHM_AXI_P_AOCHSI0_I_CLK 24 +#define CLK_GOUT_HSI0_LHM_AXI_P_HSI0_I_CLK 25 +#define CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_I_CLK 26 +#define CLK_GOUT_HSI0_LHS_AXI_D_HSI0AOC_I_CLK 27 +#define CLK_GOUT_HSI0_PPMU_HSI0_AOC_ACLK 28 +#define CLK_GOUT_HSI0_PPMU_HSI0_AOC_PCLK 29 +#define CLK_GOUT_HSI0_PPMU_HSI0_BUS0_ACLK 30 +#define CLK_GOUT_HSI0_PPMU_HSI0_BUS0_PCLK 31 +#define CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK 32 +#define CLK_GOUT_HSI0_SSMT_USB_ACLK 33 +#define CLK_GOUT_HSI0_SSMT_USB_PCLK 34 +#define CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2 35 +#define CLK_GOUT_HSI0_SYSREG_HSI0_PCLK 36 +#define CLK_GOUT_HSI0_UASC_HSI0_CTRL_ACLK 37 +#define CLK_GOUT_HSI0_UASC_HSI0_CTRL_PCLK 38 +#define CLK_GOUT_HSI0_UASC_HSI0_LINK_ACLK 39 +#define CLK_GOUT_HSI0_UASC_HSI0_LINK_PCLK 40 +#define CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL 41 +#define CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY 42 +#define CLK_GOUT_HSI0_USB31DRD_I_USB20_PHY_REFCLK_26 43 +#define CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_REF_CLK_40 44 +#define CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_REF_SOC_PLL 45 +#define CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_SCL_APB_PCLK 46 +#define CLK_GOUT_HSI0_USB31DRD_I_USBPCS_APB_CLK 47 +#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_I_ACLK 48 +#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_UDBG_I_APB_PCLK 49 +#define CLK_GOUT_HSI0_XIU_D0_HSI0_ACLK 50 +#define CLK_GOUT_HSI0_XIU_D1_HSI0_ACLK 51 +#define CLK_GOUT_HSI0_XIU_P_HSI0_ACLK 52 + +/* CMU_HSI2 */ +#define CLK_MOUT_HSI2_BUS_USER 1 +#define CLK_MOUT_HSI2_MMC_CARD_USER 2 +#define CLK_MOUT_HSI2_PCIE_USER 3 +#define CLK_MOUT_HSI2_UFS_EMBD_USER 4 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_PHY_REFCLK_IN 5 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_PHY_REFCLK_IN 6 +#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_ACLK 7 +#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_PCLK 8 +#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_ACLK 9 +#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_PCLK 10 +#define CLK_GOUT_HSI2_D_TZPC_HSI2_PCLK 11 +#define CLK_GOUT_HSI2_GPC_HSI2_PCLK 12 +#define CLK_GOUT_HSI2_GPIO_HSI2_PCLK 13 +#define CLK_GOUT_HSI2_HSI2_CMU_HSI2_PCLK 14 +#define CLK_GOUT_HSI2_LHM_AXI_P_HSI2_I_CLK 15 +#define CLK_GOUT_HSI2_LHS_ACEL_D_HSI2_I_CLK 16 +#define CLK_GOUT_HSI2_MMC_CARD_I_ACLK 17 +#define CLK_GOUT_HSI2_MMC_CARD_SDCLKIN 18 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_DBI_ACLK_UG 19 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_MSTR_ACLK_UG 20 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_SLV_ACLK_UG 21 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_I_DRIVER_APB_CLK 22 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_DBI_ACLK_UG 23 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_MSTR_ACLK_UG 24 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_SLV_ACLK_UG 25 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_I_DRIVER_APB_CLK 26 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PHY_UDBG_I_APB_PCLK 27 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PIPE_PAL_PCIE_I_APB_PCLK 28 +#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PCIEPHY210X2_QCH_I_APB_PCLK 29 +#define CLK_GOUT_HSI2_PCIE_IA_GEN4A_1_I_CLK 30 +#define CLK_GOUT_HSI2_PCIE_IA_GEN4B_1_I_CLK 31 +#define CLK_GOUT_HSI2_PPMU_HSI2_ACLK 32 +#define CLK_GOUT_HSI2_PPMU_HSI2_PCLK 33 +#define CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_ACLK 34 +#define CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_PCLK 35 +#define CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_ACLK 36 +#define CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_PCLK 37 +#define CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_ACLK 38 +#define CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_PCLK 39 +#define CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK 40 +#define CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK 41 +#define CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK 42 +#define CLK_GOUT_HSI2_CLK_HSI2_OSCCLK_CLK 43 +#define CLK_GOUT_HSI2_SSMT_HSI2_ACLK 44 +#define CLK_GOUT_HSI2_SSMT_HSI2_PCLK 45 +#define CLK_GOUT_HSI2_SYSMMU_HSI2_CLK_S2 46 +#define CLK_GOUT_HSI2_SYSREG_HSI2_PCLK 47 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK 48 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_PCLK 49 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_ACLK 50 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_PCLK 51 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_ACLK 52 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_PCLK 53 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_ACLK 54 +#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_PCLK 55 +#define CLK_GOUT_HSI2_UFS_EMBD_I_ACLK 56 +#define CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO 57 +#define CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK 58 +#define CLK_GOUT_HSI2_XIU_D_HSI2_ACLK 59 +#define CLK_GOUT_HSI2_XIU_P_HSI2_ACLK 60 + /* CMU_MISC */ #define CLK_MOUT_MISC_BUS_USER 1 #define CLK_MOUT_MISC_SSS_USER 2 diff --git a/include/dt-bindings/clock/loongson,ls2k-clk.h b/include/dt-bindings/clock/loongson,ls2k-clk.h index 3bc4dfc193..4279ba595f 100644 --- a/include/dt-bindings/clock/loongson,ls2k-clk.h +++ b/include/dt-bindings/clock/loongson,ls2k-clk.h @@ -7,24 +7,40 @@ #ifndef __DT_BINDINGS_CLOCK_LOONGSON2_H #define __DT_BINDINGS_CLOCK_LOONGSON2_H -#define LOONGSON2_REF_100M 0 -#define LOONGSON2_NODE_PLL 1 -#define LOONGSON2_DDR_PLL 2 -#define LOONGSON2_DC_PLL 3 -#define LOONGSON2_PIX0_PLL 4 -#define LOONGSON2_PIX1_PLL 5 -#define LOONGSON2_NODE_CLK 6 -#define LOONGSON2_HDA_CLK 7 -#define LOONGSON2_GPU_CLK 8 -#define LOONGSON2_DDR_CLK 9 -#define LOONGSON2_GMAC_CLK 10 -#define LOONGSON2_DC_CLK 11 -#define LOONGSON2_APB_CLK 12 -#define LOONGSON2_USB_CLK 13 -#define LOONGSON2_SATA_CLK 14 -#define LOONGSON2_PIX0_CLK 15 -#define LOONGSON2_PIX1_CLK 16 -#define LOONGSON2_BOOT_CLK 17 -#define LOONGSON2_CLK_END 18 +#define LOONGSON2_REF_100M 0 +#define LOONGSON2_NODE_PLL 1 +#define LOONGSON2_DDR_PLL 2 +#define LOONGSON2_DC_PLL 3 +#define LOONGSON2_PIX0_PLL 4 +#define LOONGSON2_PIX1_PLL 5 +#define LOONGSON2_NODE_CLK 6 +#define LOONGSON2_HDA_CLK 7 +#define LOONGSON2_GPU_CLK 8 +#define LOONGSON2_DDR_CLK 9 +#define LOONGSON2_GMAC_CLK 10 +#define LOONGSON2_DC_CLK 11 +#define LOONGSON2_APB_CLK 12 +#define LOONGSON2_USB_CLK 13 +#define LOONGSON2_SATA_CLK 14 +#define LOONGSON2_PIX0_CLK 15 +#define LOONGSON2_PIX1_CLK 16 +#define LOONGSON2_BOOT_CLK 17 +#define LOONGSON2_OUT0_GATE 18 +#define LOONGSON2_GMAC_GATE 19 +#define LOONGSON2_RIO_GATE 20 +#define LOONGSON2_DC_GATE 21 +#define LOONGSON2_GPU_GATE 22 +#define LOONGSON2_DDR_GATE 23 +#define LOONGSON2_HDA_GATE 24 +#define LOONGSON2_NODE_GATE 25 +#define LOONGSON2_EMMC_GATE 26 +#define LOONGSON2_PIX0_GATE 27 +#define LOONGSON2_PIX1_GATE 28 +#define LOONGSON2_OUT0_CLK 29 +#define LOONGSON2_RIO_CLK 30 +#define LOONGSON2_EMMC_CLK 31 +#define LOONGSON2_DES_CLK 32 +#define LOONGSON2_I2S_CLK 33 +#define LOONGSON2_MISC_CLK 34 #endif diff --git a/include/dt-bindings/clock/nxp,imx95-clock.h b/include/dt-bindings/clock/nxp,imx95-clock.h new file mode 100644 index 0000000000..782662c3e7 --- /dev/null +++ b/include/dt-bindings/clock/nxp,imx95-clock.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Copyright 2024 NXP + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX95_H +#define __DT_BINDINGS_CLOCK_IMX95_H + +#define IMX95_CLK_VPUBLK_WAVE 0 +#define IMX95_CLK_VPUBLK_JPEG_ENC 1 +#define IMX95_CLK_VPUBLK_JPEG_DEC 2 + +#define IMX95_CLK_CAMBLK_CSI2_FOR0 0 +#define IMX95_CLK_CAMBLK_CSI2_FOR1 1 +#define IMX95_CLK_CAMBLK_ISP_AXI 2 +#define IMX95_CLK_CAMBLK_ISP_PIXEL 3 +#define IMX95_CLK_CAMBLK_ISP 4 + +#define IMX95_CLK_DISPMIX_LVDS_PHY_DIV 0 +#define IMX95_CLK_DISPMIX_LVDS_CH0_GATE 1 +#define IMX95_CLK_DISPMIX_LVDS_CH1_GATE 2 +#define IMX95_CLK_DISPMIX_PIX_DI0_GATE 3 +#define IMX95_CLK_DISPMIX_PIX_DI1_GATE 4 + +#define IMX95_CLK_DISPMIX_ENG0_SEL 0 +#define IMX95_CLK_DISPMIX_ENG1_SEL 1 + +#endif /* __DT_BINDINGS_CLOCK_IMX95_H */ diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h index 1ec4827b80..655440a3e7 100644 --- a/include/dt-bindings/clock/r8a73a4-clock.h +++ b/include/dt-bindings/clock/r8a73a4-clock.h @@ -24,6 +24,10 @@ #define R8A73A4_CLK_ZS 14 #define R8A73A4_CLK_HP 15 +/* MSTP1 */ +#define R8A73A4_CLK_TMU0 25 +#define R8A73A4_CLK_TMU3 21 + /* MSTP2 */ #define R8A73A4_CLK_DMAC 18 #define R8A73A4_CLK_SCIFB3 17 diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h index 77cde8effd..1319933437 100644 --- a/include/dt-bindings/clock/r9a07g043-cpg.h +++ b/include/dt-bindings/clock/r9a07g043-cpg.h @@ -16,15 +16,15 @@ #define R9A07G043_CLK_SD0 5 #define R9A07G043_CLK_SD1 6 #define R9A07G043_CLK_M0 7 -#define R9A07G043_CLK_M2 8 -#define R9A07G043_CLK_M3 9 +#define R9A07G043_CLK_M2 8 /* RZ/G2UL Only */ +#define R9A07G043_CLK_M3 9 /* RZ/G2UL Only */ #define R9A07G043_CLK_HP 10 #define R9A07G043_CLK_TSU 11 #define R9A07G043_CLK_ZT 12 #define R9A07G043_CLK_P0 13 #define R9A07G043_CLK_P1 14 #define R9A07G043_CLK_P2 15 -#define R9A07G043_CLK_AT 16 +#define R9A07G043_CLK_AT 16 /* RZ/G2UL Only */ #define R9A07G043_OSCCLK 17 #define R9A07G043_CLK_P0_DIV2 18 @@ -200,5 +200,57 @@ #define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */ #define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */ +/* Power domain IDs. */ +#define R9A07G043_PD_ALWAYS_ON 0 +#define R9A07G043_PD_GIC 1 /* RZ/G2UL Only */ +#define R9A07G043_PD_IA55 2 /* RZ/G2UL Only */ +#define R9A07G043_PD_MHU 3 /* RZ/G2UL Only */ +#define R9A07G043_PD_CORESIGHT 4 /* RZ/G2UL Only */ +#define R9A07G043_PD_SYC 5 /* RZ/G2UL Only */ +#define R9A07G043_PD_DMAC 6 +#define R9A07G043_PD_GTM0 7 +#define R9A07G043_PD_GTM1 8 +#define R9A07G043_PD_GTM2 9 +#define R9A07G043_PD_MTU 10 +#define R9A07G043_PD_POE3 11 +#define R9A07G043_PD_WDT0 12 +#define R9A07G043_PD_SPI 13 +#define R9A07G043_PD_SDHI0 14 +#define R9A07G043_PD_SDHI1 15 +#define R9A07G043_PD_ISU 16 /* RZ/G2UL Only */ +#define R9A07G043_PD_CRU 17 /* RZ/G2UL Only */ +#define R9A07G043_PD_LCDC 18 /* RZ/G2UL Only */ +#define R9A07G043_PD_SSI0 19 +#define R9A07G043_PD_SSI1 20 +#define R9A07G043_PD_SSI2 21 +#define R9A07G043_PD_SSI3 22 +#define R9A07G043_PD_SRC 23 +#define R9A07G043_PD_USB0 24 +#define R9A07G043_PD_USB1 25 +#define R9A07G043_PD_USB_PHY 26 +#define R9A07G043_PD_ETHER0 27 +#define R9A07G043_PD_ETHER1 28 +#define R9A07G043_PD_I2C0 29 +#define R9A07G043_PD_I2C1 30 +#define R9A07G043_PD_I2C2 31 +#define R9A07G043_PD_I2C3 32 +#define R9A07G043_PD_SCIF0 33 +#define R9A07G043_PD_SCIF1 34 +#define R9A07G043_PD_SCIF2 35 +#define R9A07G043_PD_SCIF3 36 +#define R9A07G043_PD_SCIF4 37 +#define R9A07G043_PD_SCI0 38 +#define R9A07G043_PD_SCI1 39 +#define R9A07G043_PD_IRDA 40 +#define R9A07G043_PD_RSPI0 41 +#define R9A07G043_PD_RSPI1 42 +#define R9A07G043_PD_RSPI2 43 +#define R9A07G043_PD_CANFD 44 +#define R9A07G043_PD_ADC 45 +#define R9A07G043_PD_TSU 46 +#define R9A07G043_PD_PLIC 47 /* RZ/Five Only */ +#define R9A07G043_PD_IAX45 48 /* RZ/Five Only */ +#define R9A07G043_PD_NCEPLDM 49 /* RZ/Five Only */ +#define R9A07G043_PD_NCEPLMT 50 /* RZ/Five Only */ #endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */ diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h index 0bb17ff1a0..e209f96f92 100644 --- a/include/dt-bindings/clock/r9a07g044-cpg.h +++ b/include/dt-bindings/clock/r9a07g044-cpg.h @@ -217,4 +217,62 @@ #define R9A07G044_ADC_ADRST_N 82 #define R9A07G044_TSU_PRESETN 83 +/* Power domain IDs. */ +#define R9A07G044_PD_ALWAYS_ON 0 +#define R9A07G044_PD_GIC 1 +#define R9A07G044_PD_IA55 2 +#define R9A07G044_PD_MHU 3 +#define R9A07G044_PD_CORESIGHT 4 +#define R9A07G044_PD_SYC 5 +#define R9A07G044_PD_DMAC 6 +#define R9A07G044_PD_GTM0 7 +#define R9A07G044_PD_GTM1 8 +#define R9A07G044_PD_GTM2 9 +#define R9A07G044_PD_MTU 10 +#define R9A07G044_PD_POE3 11 +#define R9A07G044_PD_GPT 12 +#define R9A07G044_PD_POEGA 13 +#define R9A07G044_PD_POEGB 14 +#define R9A07G044_PD_POEGC 15 +#define R9A07G044_PD_POEGD 16 +#define R9A07G044_PD_WDT0 17 +#define R9A07G044_PD_WDT1 18 +#define R9A07G044_PD_SPI 19 +#define R9A07G044_PD_SDHI0 20 +#define R9A07G044_PD_SDHI1 21 +#define R9A07G044_PD_3DGE 22 +#define R9A07G044_PD_ISU 23 +#define R9A07G044_PD_VCPL4 24 +#define R9A07G044_PD_CRU 25 +#define R9A07G044_PD_MIPI_DSI 26 +#define R9A07G044_PD_LCDC 27 +#define R9A07G044_PD_SSI0 28 +#define R9A07G044_PD_SSI1 29 +#define R9A07G044_PD_SSI2 30 +#define R9A07G044_PD_SSI3 31 +#define R9A07G044_PD_SRC 32 +#define R9A07G044_PD_USB0 33 +#define R9A07G044_PD_USB1 34 +#define R9A07G044_PD_USB_PHY 35 +#define R9A07G044_PD_ETHER0 36 +#define R9A07G044_PD_ETHER1 37 +#define R9A07G044_PD_I2C0 38 +#define R9A07G044_PD_I2C1 39 +#define R9A07G044_PD_I2C2 40 +#define R9A07G044_PD_I2C3 41 +#define R9A07G044_PD_SCIF0 42 +#define R9A07G044_PD_SCIF1 43 +#define R9A07G044_PD_SCIF2 44 +#define R9A07G044_PD_SCIF3 45 +#define R9A07G044_PD_SCIF4 46 +#define R9A07G044_PD_SCI0 47 +#define R9A07G044_PD_SCI1 48 +#define R9A07G044_PD_IRDA 49 +#define R9A07G044_PD_RSPI0 50 +#define R9A07G044_PD_RSPI1 51 +#define R9A07G044_PD_RSPI2 52 +#define R9A07G044_PD_CANFD 53 +#define R9A07G044_PD_ADC 54 +#define R9A07G044_PD_TSU 55 + #endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */ diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h index 43f4dbda87..2c99f89397 100644 --- a/include/dt-bindings/clock/r9a07g054-cpg.h +++ b/include/dt-bindings/clock/r9a07g054-cpg.h @@ -226,4 +226,62 @@ #define R9A07G054_TSU_PRESETN 83 #define R9A07G054_STPAI_ARESETN 84 +/* Power domain IDs. */ +#define R9A07G054_PD_ALWAYS_ON 0 +#define R9A07G054_PD_GIC 1 +#define R9A07G054_PD_IA55 2 +#define R9A07G054_PD_MHU 3 +#define R9A07G054_PD_CORESIGHT 4 +#define R9A07G054_PD_SYC 5 +#define R9A07G054_PD_DMAC 6 +#define R9A07G054_PD_GTM0 7 +#define R9A07G054_PD_GTM1 8 +#define R9A07G054_PD_GTM2 9 +#define R9A07G054_PD_MTU 10 +#define R9A07G054_PD_POE3 11 +#define R9A07G054_PD_GPT 12 +#define R9A07G054_PD_POEGA 13 +#define R9A07G054_PD_POEGB 14 +#define R9A07G054_PD_POEGC 15 +#define R9A07G054_PD_POEGD 16 +#define R9A07G054_PD_WDT0 17 +#define R9A07G054_PD_WDT1 18 +#define R9A07G054_PD_SPI 19 +#define R9A07G054_PD_SDHI0 20 +#define R9A07G054_PD_SDHI1 21 +#define R9A07G054_PD_3DGE 22 +#define R9A07G054_PD_ISU 23 +#define R9A07G054_PD_VCPL4 24 +#define R9A07G054_PD_CRU 25 +#define R9A07G054_PD_MIPI_DSI 26 +#define R9A07G054_PD_LCDC 27 +#define R9A07G054_PD_SSI0 28 +#define R9A07G054_PD_SSI1 29 +#define R9A07G054_PD_SSI2 30 +#define R9A07G054_PD_SSI3 31 +#define R9A07G054_PD_SRC 32 +#define R9A07G054_PD_USB0 33 +#define R9A07G054_PD_USB1 34 +#define R9A07G054_PD_USB_PHY 35 +#define R9A07G054_PD_ETHER0 36 +#define R9A07G054_PD_ETHER1 37 +#define R9A07G054_PD_I2C0 38 +#define R9A07G054_PD_I2C1 39 +#define R9A07G054_PD_I2C2 40 +#define R9A07G054_PD_I2C3 41 +#define R9A07G054_PD_SCIF0 42 +#define R9A07G054_PD_SCIF1 43 +#define R9A07G054_PD_SCIF2 44 +#define R9A07G054_PD_SCIF3 45 +#define R9A07G054_PD_SCIF4 46 +#define R9A07G054_PD_SCI0 47 +#define R9A07G054_PD_SCI1 48 +#define R9A07G054_PD_IRDA 49 +#define R9A07G054_PD_RSPI0 50 +#define R9A07G054_PD_RSPI1 51 +#define R9A07G054_PD_RSPI2 52 +#define R9A07G054_PD_CANFD 53 +#define R9A07G054_PD_ADC 54 +#define R9A07G054_PD_TSU 55 + #endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */ diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h index 410725b778..8281e9caf3 100644 --- a/include/dt-bindings/clock/r9a08g045-cpg.h +++ b/include/dt-bindings/clock/r9a08g045-cpg.h @@ -239,4 +239,74 @@ #define R9A08G045_I3C_PRESETN 92 #define R9A08G045_VBAT_BRESETN 93 +/* Power domain IDs. */ +#define R9A08G045_PD_ALWAYS_ON 0 +#define R9A08G045_PD_GIC 1 +#define R9A08G045_PD_IA55 2 +#define R9A08G045_PD_MHU 3 +#define R9A08G045_PD_CORESIGHT 4 +#define R9A08G045_PD_SYC 5 +#define R9A08G045_PD_DMAC 6 +#define R9A08G045_PD_GTM0 7 +#define R9A08G045_PD_GTM1 8 +#define R9A08G045_PD_GTM2 9 +#define R9A08G045_PD_GTM3 10 +#define R9A08G045_PD_GTM4 11 +#define R9A08G045_PD_GTM5 12 +#define R9A08G045_PD_GTM6 13 +#define R9A08G045_PD_GTM7 14 +#define R9A08G045_PD_MTU 15 +#define R9A08G045_PD_POE3 16 +#define R9A08G045_PD_GPT 17 +#define R9A08G045_PD_POEGA 18 +#define R9A08G045_PD_POEGB 19 +#define R9A08G045_PD_POEGC 20 +#define R9A08G045_PD_POEGD 21 +#define R9A08G045_PD_WDT0 22 +#define R9A08G045_PD_XSPI 23 +#define R9A08G045_PD_SDHI0 24 +#define R9A08G045_PD_SDHI1 25 +#define R9A08G045_PD_SDHI2 26 +#define R9A08G045_PD_SSI0 27 +#define R9A08G045_PD_SSI1 28 +#define R9A08G045_PD_SSI2 29 +#define R9A08G045_PD_SSI3 30 +#define R9A08G045_PD_SRC 31 +#define R9A08G045_PD_USB0 32 +#define R9A08G045_PD_USB1 33 +#define R9A08G045_PD_USB_PHY 34 +#define R9A08G045_PD_ETHER0 35 +#define R9A08G045_PD_ETHER1 36 +#define R9A08G045_PD_I2C0 37 +#define R9A08G045_PD_I2C1 38 +#define R9A08G045_PD_I2C2 39 +#define R9A08G045_PD_I2C3 40 +#define R9A08G045_PD_SCIF0 41 +#define R9A08G045_PD_SCIF1 42 +#define R9A08G045_PD_SCIF2 43 +#define R9A08G045_PD_SCIF3 44 +#define R9A08G045_PD_SCIF4 45 +#define R9A08G045_PD_SCIF5 46 +#define R9A08G045_PD_SCI0 47 +#define R9A08G045_PD_SCI1 48 +#define R9A08G045_PD_IRDA 49 +#define R9A08G045_PD_RSPI0 50 +#define R9A08G045_PD_RSPI1 51 +#define R9A08G045_PD_RSPI2 52 +#define R9A08G045_PD_RSPI3 53 +#define R9A08G045_PD_RSPI4 54 +#define R9A08G045_PD_CANFD 55 +#define R9A08G045_PD_ADC 56 +#define R9A08G045_PD_TSU 57 +#define R9A08G045_PD_OCTA 58 +#define R9A08G045_PD_PDM 59 +#define R9A08G045_PD_PCI 60 +#define R9A08G045_PD_SPDIF 61 +#define R9A08G045_PD_I3C 62 +#define R9A08G045_PD_VBAT 63 + +#define R9A08G045_PD_DDR 64 +#define R9A08G045_PD_TZCDDR 65 +#define R9A08G045_PD_OTFDE_DDR 66 + #endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */ diff --git a/include/dt-bindings/clock/rk3568-cru.h b/include/dt-bindings/clock/rk3568-cru.h index d298908651..5263085c5b 100644 --- a/include/dt-bindings/clock/rk3568-cru.h +++ b/include/dt-bindings/clock/rk3568-cru.h @@ -78,6 +78,7 @@ #define CPLL_333M 9 #define ARMCLK 10 #define USB480M 11 +#define USB480M_PHY 12 #define ACLK_CORE_NIU2BUS 18 #define CLK_CORE_PVTM 19 #define CLK_CORE_PVTM_CORE 20 diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index ecea167930..4f017bea01 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -46,6 +46,7 @@ #define LED_FUNCTION_CAPSLOCK "capslock" #define LED_FUNCTION_SCROLLLOCK "scrolllock" #define LED_FUNCTION_NUMLOCK "numlock" +#define LED_FUNCTION_FNLOCK "fnlock" /* Obsolete equivalents: "tpacpi::thinklight" (IBM/Lenovo Thinkpads), "lp5523:kb{1,2,3,4,5,6}" (Nokia N900) */ #define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight" @@ -90,11 +91,14 @@ #define LED_FUNCTION_INDICATOR "indicator" #define LED_FUNCTION_LAN "lan" #define LED_FUNCTION_MAIL "mail" +#define LED_FUNCTION_MOBILE "mobile" #define LED_FUNCTION_MTD "mtd" #define LED_FUNCTION_PANIC "panic" #define LED_FUNCTION_PROGRAMMING "programming" #define LED_FUNCTION_RX "rx" #define LED_FUNCTION_SD "sd" +#define LED_FUNCTION_SPEED_LAN "speed-lan" +#define LED_FUNCTION_SPEED_WAN "speed-wan" #define LED_FUNCTION_STANDBY "standby" #define LED_FUNCTION_TORCH "torch" #define LED_FUNCTION_TX "tx" diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h index 6fc4b445d3..b8a4f3ff4a 100644 --- a/include/dt-bindings/net/ti-dp83867.h +++ b/include/dt-bindings/net/ti-dp83867.h @@ -1,10 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ /* * Device Tree constants for the Texas Instruments DP83867 PHY * * Author: Dan Murphy <dmurphy@ti.com> * - * Copyright: (C) 2015 Texas Instruments, Inc. + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _DT_BINDINGS_TI_DP83867_H diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h index 218b1a64e9..917114aad7 100644 --- a/include/dt-bindings/net/ti-dp83869.h +++ b/include/dt-bindings/net/ti-dp83869.h @@ -1,10 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ /* * Device Tree constants for the Texas Instruments DP83869 PHY * * Author: Dan Murphy <dmurphy@ti.com> * - * Copyright: (C) 2019 Texas Instruments, Inc. + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _DT_BINDINGS_TI_DP83869_H diff --git a/include/dt-bindings/phy/phy-qcom-qmp.h b/include/dt-bindings/phy/phy-qcom-qmp.h index 4edec4c5b2..6b43ea9e00 100644 --- a/include/dt-bindings/phy/phy-qcom-qmp.h +++ b/include/dt-bindings/phy/phy-qcom-qmp.h @@ -17,4 +17,8 @@ #define QMP_USB43DP_USB3_PHY 0 #define QMP_USB43DP_DP_PHY 1 +/* QMP PCIE PHYs */ +#define QMP_PCIE_PIPE_CLK 0 +#define QMP_PCIE_PHY_AUX_CLK 1 + #endif /* _DT_BINDINGS_PHY_QMP */ diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h deleted file mode 100644 index d1da5ff68d..0000000000 --- a/include/dt-bindings/pinctrl/samsung.h +++ /dev/null @@ -1,95 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Samsung's Exynos pinctrl bindings - * - * Copyright (c) 2016 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * Author: Krzysztof Kozlowski <krzk@kernel.org> - */ - -#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__ -#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__ - -/* - * These bindings are deprecated, because they do not match the actual - * concept of bindings but rather contain pure register values. - * Instead include the header in the DTS source directory. - */ -#warning "These bindings are deprecated. Instead use the header in the DTS source directory." - -#define EXYNOS_PIN_PULL_NONE 0 -#define EXYNOS_PIN_PULL_DOWN 1 -#define EXYNOS_PIN_PULL_UP 3 - -#define S3C64XX_PIN_PULL_NONE 0 -#define S3C64XX_PIN_PULL_DOWN 1 -#define S3C64XX_PIN_PULL_UP 2 - -/* Pin function in power down mode */ -#define EXYNOS_PIN_PDN_OUT0 0 -#define EXYNOS_PIN_PDN_OUT1 1 -#define EXYNOS_PIN_PDN_INPUT 2 -#define EXYNOS_PIN_PDN_PREV 3 - -/* Drive strengths for Exynos3250, Exynos4 (all) and Exynos5250 */ -#define EXYNOS4_PIN_DRV_LV1 0 -#define EXYNOS4_PIN_DRV_LV2 2 -#define EXYNOS4_PIN_DRV_LV3 1 -#define EXYNOS4_PIN_DRV_LV4 3 - -/* Drive strengths for Exynos5260 */ -#define EXYNOS5260_PIN_DRV_LV1 0 -#define EXYNOS5260_PIN_DRV_LV2 1 -#define EXYNOS5260_PIN_DRV_LV4 2 -#define EXYNOS5260_PIN_DRV_LV6 3 - -/* - * Drive strengths for Exynos5410, Exynos542x, Exynos5800 and Exynos850 (except - * GPIO_HSI block) - */ -#define EXYNOS5420_PIN_DRV_LV1 0 -#define EXYNOS5420_PIN_DRV_LV2 1 -#define EXYNOS5420_PIN_DRV_LV3 2 -#define EXYNOS5420_PIN_DRV_LV4 3 - -/* Drive strengths for Exynos5433 */ -#define EXYNOS5433_PIN_DRV_FAST_SR1 0 -#define EXYNOS5433_PIN_DRV_FAST_SR2 1 -#define EXYNOS5433_PIN_DRV_FAST_SR3 2 -#define EXYNOS5433_PIN_DRV_FAST_SR4 3 -#define EXYNOS5433_PIN_DRV_FAST_SR5 4 -#define EXYNOS5433_PIN_DRV_FAST_SR6 5 -#define EXYNOS5433_PIN_DRV_SLOW_SR1 8 -#define EXYNOS5433_PIN_DRV_SLOW_SR2 9 -#define EXYNOS5433_PIN_DRV_SLOW_SR3 0xa -#define EXYNOS5433_PIN_DRV_SLOW_SR4 0xb -#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc -#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf - -/* Drive strengths for Exynos850 GPIO_HSI block */ -#define EXYNOS850_HSI_PIN_DRV_LV1 0 /* 1x */ -#define EXYNOS850_HSI_PIN_DRV_LV1_5 1 /* 1.5x */ -#define EXYNOS850_HSI_PIN_DRV_LV2 2 /* 2x */ -#define EXYNOS850_HSI_PIN_DRV_LV2_5 3 /* 2.5x */ -#define EXYNOS850_HSI_PIN_DRV_LV3 4 /* 3x */ -#define EXYNOS850_HSI_PIN_DRV_LV4 5 /* 4x */ - -#define EXYNOS_PIN_FUNC_INPUT 0 -#define EXYNOS_PIN_FUNC_OUTPUT 1 -#define EXYNOS_PIN_FUNC_2 2 -#define EXYNOS_PIN_FUNC_3 3 -#define EXYNOS_PIN_FUNC_4 4 -#define EXYNOS_PIN_FUNC_5 5 -#define EXYNOS_PIN_FUNC_6 6 -#define EXYNOS_PIN_FUNC_EINT 0xf -#define EXYNOS_PIN_FUNC_F EXYNOS_PIN_FUNC_EINT - -/* Drive strengths for Exynos7 FSYS1 block */ -#define EXYNOS7_FSYS1_PIN_DRV_LV1 0 -#define EXYNOS7_FSYS1_PIN_DRV_LV2 4 -#define EXYNOS7_FSYS1_PIN_DRV_LV3 2 -#define EXYNOS7_FSYS1_PIN_DRV_LV4 6 -#define EXYNOS7_FSYS1_PIN_DRV_LV5 1 -#define EXYNOS7_FSYS1_PIN_DRV_LV6 5 - -#endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */ diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h index d4264db2a0..e2fe4bd5f7 100644 --- a/include/dt-bindings/reset/rockchip,rk3588-cru.h +++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h @@ -751,4 +751,6 @@ #define SRST_P_TRNG_CHK 658 #define SRST_TRNG_S 659 +#define SRST_A_HDMIRX_BIU 660 + #endif diff --git a/include/dt-bindings/reset/st,stm32mp25-rcc.h b/include/dt-bindings/reset/st,stm32mp25-rcc.h index d5615930bc..748e78ae20 100644 --- a/include/dt-bindings/reset/st,stm32mp25-rcc.h +++ b/include/dt-bindings/reset/st,stm32mp25-rcc.h @@ -69,7 +69,7 @@ #define ADC3_R 59 #define ETH1_R 60 #define ETH2_R 61 -#define USB2_R 62 +#define USBH_R 62 #define USB2PHY1_R 63 #define USB2PHY2_R 64 #define USB3DR_R 65 diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h index 997e2f5512..bf95309d25 100644 --- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h +++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h @@ -16,6 +16,32 @@ #define MT7988_ETHWARP_0 6 #define MT7988_ETHWARP_1 7 +#define MT8186_LITTLE_CPU0 0 +#define MT8186_LITTLE_CPU1 1 +#define MT8186_LITTLE_CPU2 2 +#define MT8186_CAM 3 +#define MT8186_BIG_CPU0 4 +#define MT8186_BIG_CPU1 5 +#define MT8186_NNA 6 +#define MT8186_ADSP 7 +#define MT8186_MFG 8 + +#define MT8188_MCU_LITTLE_CPU0 0 +#define MT8188_MCU_LITTLE_CPU1 1 +#define MT8188_MCU_LITTLE_CPU2 2 +#define MT8188_MCU_LITTLE_CPU3 3 +#define MT8188_MCU_BIG_CPU0 4 +#define MT8188_MCU_BIG_CPU1 5 + +#define MT8188_AP_APU 0 +#define MT8188_AP_GPU1 1 +#define MT8188_AP_GPU2 2 +#define MT8188_AP_SOC1 3 +#define MT8188_AP_SOC2 4 +#define MT8188_AP_SOC3 5 +#define MT8188_AP_CAM1 6 +#define MT8188_AP_CAM2 7 + #define MT8195_MCU_BIG_CPU0 0 #define MT8195_MCU_BIG_CPU1 1 #define MT8195_MCU_BIG_CPU2 2 diff --git a/include/keys/trusted_dcp.h b/include/keys/trusted_dcp.h new file mode 100644 index 0000000000..9aaa42075b --- /dev/null +++ b/include/keys/trusted_dcp.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 sigma star gmbh + */ + +#ifndef TRUSTED_DCP_H +#define TRUSTED_DCP_H + +extern struct trusted_key_ops dcp_trusted_key_ops; + +#endif diff --git a/include/keys/trusted_tpm.h b/include/keys/trusted_tpm.h index 7769b72686..a088b33fd0 100644 --- a/include/keys/trusted_tpm.h +++ b/include/keys/trusted_tpm.h @@ -6,8 +6,6 @@ #include <linux/tpm_command.h> /* implementation specific TPM constants */ -#define MAX_BUF_SIZE 1024 -#define TPM_GETRANDOM_SIZE 14 #define TPM_SIZE_OFFSET 2 #define TPM_RETURN_OFFSET 6 #define TPM_DATA_OFFSET 10 diff --git a/include/kunit/test.h b/include/kunit/test.h index 61637ef323..e32b4cb7af 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -301,6 +301,8 @@ struct kunit { struct list_head resources; /* Protected by lock. */ char status_comment[KUNIT_STATUS_COMMENT_SIZE]; + /* Saves the last seen test. Useful to help with faults. */ + struct kunit_loc last_seen; }; static inline void kunit_set_failure(struct kunit *test) @@ -567,6 +569,15 @@ void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, #define kunit_err(test, fmt, ...) \ kunit_printk(KERN_ERR, test, fmt, ##__VA_ARGS__) +/* + * Must be called at the beginning of each KUNIT_*_ASSERTION(). + * Cf. KUNIT_CURRENT_LOC. + */ +#define _KUNIT_SAVE_LOC(test) do { \ + WRITE_ONCE(test->last_seen.file, __FILE__); \ + WRITE_ONCE(test->last_seen.line, __LINE__); \ +} while (0) + /** * KUNIT_SUCCEED() - A no-op expectation. Only exists for code clarity. * @test: The test context object. @@ -575,7 +586,7 @@ void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, * words, it does nothing and only exists for code clarity. See * KUNIT_EXPECT_TRUE() for more information. */ -#define KUNIT_SUCCEED(test) do {} while (0) +#define KUNIT_SUCCEED(test) _KUNIT_SAVE_LOC(test) void __noreturn __kunit_abort(struct kunit *test); @@ -601,14 +612,16 @@ void __printf(6, 7) __kunit_do_failed_assertion(struct kunit *test, } while (0) -#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \ +#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) do { \ + _KUNIT_SAVE_LOC(test); \ _KUNIT_FAILED(test, \ assert_type, \ kunit_fail_assert, \ kunit_fail_assert_format, \ {}, \ fmt, \ - ##__VA_ARGS__) + ##__VA_ARGS__); \ +} while (0) /** * KUNIT_FAIL() - Always causes a test to fail when evaluated. @@ -637,6 +650,7 @@ void __printf(6, 7) __kunit_do_failed_assertion(struct kunit *test, fmt, \ ...) \ do { \ + _KUNIT_SAVE_LOC(test); \ if (likely(!!(condition_) == !!expected_true_)) \ break; \ \ @@ -698,6 +712,7 @@ do { \ .right_text = #right, \ }; \ \ + _KUNIT_SAVE_LOC(test); \ if (likely(__left op __right)) \ break; \ \ @@ -758,6 +773,7 @@ do { \ .right_text = #right, \ }; \ \ + _KUNIT_SAVE_LOC(test); \ if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \ break; \ \ @@ -791,6 +807,7 @@ do { \ .right_text = #right, \ }; \ \ + _KUNIT_SAVE_LOC(test); \ if (likely(__left && __right)) \ if (likely(memcmp(__left, __right, __size) op 0)) \ break; \ @@ -815,6 +832,7 @@ do { \ do { \ const typeof(ptr) __ptr = (ptr); \ \ + _KUNIT_SAVE_LOC(test); \ if (!IS_ERR_OR_NULL(__ptr)) \ break; \ \ diff --git a/include/kunit/try-catch.h b/include/kunit/try-catch.h index c507dd4311..7c966a1adb 100644 --- a/include/kunit/try-catch.h +++ b/include/kunit/try-catch.h @@ -14,13 +14,11 @@ typedef void (*kunit_try_catch_func_t)(void *); -struct completion; struct kunit; /** * struct kunit_try_catch - provides a generic way to run code which might fail. * @test: The test case that is currently being executed. - * @try_completion: Completion that the control thread waits on while test runs. * @try_result: Contains any errno obtained while running test case. * @try: The function, the test case, to attempt to run. * @catch: The function called if @try bails out. @@ -46,7 +44,6 @@ struct kunit; struct kunit_try_catch { /* private: internal use only. */ struct kunit *test; - struct completion *try_completion; int try_result; kunit_try_catch_func_t try; kunit_try_catch_func_t catch; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4703594664..f5172549f9 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -210,6 +210,12 @@ struct vgic_its { struct mutex its_lock; struct list_head device_list; struct list_head collection_list; + + /* + * Caches the (device_id, event_id) -> vgic_irq translation for + * LPIs that are mapped and enabled. + */ + struct xarray translation_cache; }; struct vgic_state_iter; @@ -274,13 +280,8 @@ struct vgic_dist { */ u64 propbaser; - /* Protects the lpi_list. */ - raw_spinlock_t lpi_list_lock; +#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0 struct xarray lpi_xa; - atomic_t lpi_count; - - /* LPI translation cache */ - struct list_head lpi_translation_cache; /* used by vgic-debug */ struct vgic_state_iter *iter; @@ -330,7 +331,7 @@ struct vgic_cpu { struct vgic_v3_cpu_if vgic_v3; }; - struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + struct vgic_irq *private_irqs; raw_spinlock_t ap_list_lock; /* Protects the ap_list */ @@ -388,7 +389,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); void kvm_vgic_load(struct kvm_vcpu *vcpu); void kvm_vgic_put(struct kvm_vcpu *vcpu); -void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) ((k)->arch.vgic.initialized) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 168201e4c7..28c3fb2bef 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -242,9 +242,6 @@ static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc) return gicc->flags & ACPI_MADT_ENABLED; } -/* the following numa functions are architecture-dependent */ -void acpi_numa_slit_init (struct acpi_table_slit *slit); - #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else @@ -267,8 +264,6 @@ static inline void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } #endif -int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); - #ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) @@ -421,7 +416,7 @@ extern char *wmi_get_acpi_device_uid(const char *guid); extern char acpi_video_backlight_string[]; extern long acpi_is_video_device(acpi_handle handle); -extern int acpi_blacklisted(void); + extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); @@ -1237,7 +1232,7 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); -int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index, +int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, @@ -1250,7 +1245,7 @@ static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, { return false; } -static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, +static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) { return -ENXIO; @@ -1263,10 +1258,10 @@ static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable); } -static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, +static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id, int index) { - return acpi_dev_gpio_irq_wake_get_by(adev, name, index, NULL); + return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL); } static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 1cb65592c9..d4ed5622cf 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -39,7 +39,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head); /* IOMMU interface */ -int iort_dma_get_ranges(struct device *dev, u64 *size); +int iort_dma_get_ranges(struct device *dev, u64 *limit); int iort_iommu_configure_id(struct device *dev, const u32 *id_in); void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head); phys_addr_t acpi_iort_dma_get_max_cpu_address(void); @@ -55,7 +55,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *hea static inline void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { } /* IOMMU interface */ -static inline int iort_dma_get_ranges(struct device *dev, u64 *size) +static inline int iort_dma_get_ranges(struct device *dev, u64 *limit) { return -ENODEV; } static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) { return -ENODEV; } diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h new file mode 100644 index 0000000000..8c61ccd161 --- /dev/null +++ b/include/linux/alloc_tag.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * allocation tagging + */ +#ifndef _LINUX_ALLOC_TAG_H +#define _LINUX_ALLOC_TAG_H + +#include <linux/bug.h> +#include <linux/codetag.h> +#include <linux/container_of.h> +#include <linux/preempt.h> +#include <asm/percpu.h> +#include <linux/cpumask.h> +#include <linux/smp.h> +#include <linux/static_key.h> +#include <linux/irqflags.h> + +struct alloc_tag_counters { + u64 bytes; + u64 calls; +}; + +/* + * An instance of this structure is created in a special ELF section at every + * allocation callsite. At runtime, the special section is treated as + * an array of these. Embedded codetag utilizes codetag framework. + */ +struct alloc_tag { + struct codetag ct; + struct alloc_tag_counters __percpu *counters; +} __aligned(8); + +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG + +#define CODETAG_EMPTY ((void *)1) + +static inline bool is_codetag_empty(union codetag_ref *ref) +{ + return ref->ct == CODETAG_EMPTY; +} + +static inline void set_codetag_empty(union codetag_ref *ref) +{ + if (ref) + ref->ct = CODETAG_EMPTY; +} + +#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ + +static inline bool is_codetag_empty(union codetag_ref *ref) { return false; } +static inline void set_codetag_empty(union codetag_ref *ref) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ + +#ifdef CONFIG_MEM_ALLOC_PROFILING + +struct codetag_bytes { + struct codetag *ct; + s64 bytes; +}; + +size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep); + +static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) +{ + return container_of(ct, struct alloc_tag, ct); +} + +#ifdef ARCH_NEEDS_WEAK_PER_CPU +/* + * When percpu variables are required to be defined as weak, static percpu + * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION). + * Instead we will accound all module allocations to a single counter. + */ +DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); + +#define DEFINE_ALLOC_TAG(_alloc_tag) \ + static struct alloc_tag _alloc_tag __used __aligned(8) \ + __section("alloc_tags") = { \ + .ct = CODE_TAG_INIT, \ + .counters = &_shared_alloc_tag }; + +#else /* ARCH_NEEDS_WEAK_PER_CPU */ + +#define DEFINE_ALLOC_TAG(_alloc_tag) \ + static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ + static struct alloc_tag _alloc_tag __used __aligned(8) \ + __section("alloc_tags") = { \ + .ct = CODE_TAG_INIT, \ + .counters = &_alloc_tag_cntr }; + +#endif /* ARCH_NEEDS_WEAK_PER_CPU */ + +DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, + mem_alloc_profiling_key); + +static inline bool mem_alloc_profiling_enabled(void) +{ + return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, + &mem_alloc_profiling_key); +} + +static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag) +{ + struct alloc_tag_counters v = { 0, 0 }; + struct alloc_tag_counters *counter; + int cpu; + + for_each_possible_cpu(cpu) { + counter = per_cpu_ptr(tag->counters, cpu); + v.bytes += counter->bytes; + v.calls += counter->calls; + } + + return v; +} + +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG +static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) +{ + WARN_ONCE(ref && ref->ct, + "alloc_tag was not cleared (got tag for %s:%u)\n", + ref->ct->filename, ref->ct->lineno); + + WARN_ONCE(!tag, "current->alloc_tag not set\n"); +} + +static inline void alloc_tag_sub_check(union codetag_ref *ref) +{ + WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n"); +} +#else +static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {} +static inline void alloc_tag_sub_check(union codetag_ref *ref) {} +#endif + +/* Caller should verify both ref and tag to be valid */ +static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +{ + ref->ct = &tag->ct; + /* + * We need in increment the call counter every time we have a new + * allocation or when we split a large allocation into smaller ones. + * Each new reference for every sub-allocation needs to increment call + * counter because when we free each part the counter will be decremented. + */ + this_cpu_inc(tag->counters->calls); +} + +static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +{ + alloc_tag_add_check(ref, tag); + if (!ref || !tag) + return; + + __alloc_tag_ref_set(ref, tag); +} + +static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) +{ + alloc_tag_add_check(ref, tag); + if (!ref || !tag) + return; + + __alloc_tag_ref_set(ref, tag); + this_cpu_add(tag->counters->bytes, bytes); +} + +static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) +{ + struct alloc_tag *tag; + + alloc_tag_sub_check(ref); + if (!ref || !ref->ct) + return; + + if (is_codetag_empty(ref)) { + ref->ct = NULL; + return; + } + + tag = ct_to_alloc_tag(ref->ct); + + this_cpu_sub(tag->counters->bytes, bytes); + this_cpu_dec(tag->counters->calls); + + ref->ct = NULL; +} + +#define alloc_tag_record(p) ((p) = current->alloc_tag) + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +#define DEFINE_ALLOC_TAG(_alloc_tag) +static inline bool mem_alloc_profiling_enabled(void) { return false; } +static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, + size_t bytes) {} +static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} +#define alloc_tag_record(p) do {} while (0) + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + +#define alloc_hooks_tag(_tag, _do_alloc) \ +({ \ + struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \ + typeof(_do_alloc) _res = _do_alloc; \ + alloc_tag_restore(_tag, _old); \ + _res; \ +}) + +#define alloc_hooks(_do_alloc) \ +({ \ + DEFINE_ALLOC_TAG(_alloc_tag); \ + alloc_hooks_tag(&_alloc_tag, _do_alloc); \ +}) + +#endif /* _LINUX_ALLOC_TAG_H */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c60a6a1463..958a55bcc7 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -112,11 +112,18 @@ extern struct bus_type amba_bustype; #define amba_get_drvdata(d) dev_get_drvdata(&d->dev) #define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define amba_driver_register(drv) \ + __amba_driver_register(drv, THIS_MODULE) + #ifdef CONFIG_ARM_AMBA -int amba_driver_register(struct amba_driver *); +int __amba_driver_register(struct amba_driver *, struct module *); void amba_driver_unregister(struct amba_driver *); #else -static inline int amba_driver_register(struct amba_driver *drv) +static inline int __amba_driver_register(struct amba_driver *drv, + struct module *owner) { return -EINVAL; } diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 93a5f16d03..edef565c2a 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -9,12 +9,17 @@ #ifndef _LINUX_ANON_INODES_H #define _LINUX_ANON_INODES_H +#include <linux/types.h> + struct file_operations; struct inode; struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags); +struct file *anon_inode_getfile_fmode(const char *name, + const struct file_operations *fops, + void *priv, int flags, fmode_t f_mode); struct file *anon_inode_create_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index a63d61ca55..b721f360d7 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -60,14 +60,14 @@ void topology_scale_freq_tick(void); void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus); void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus); -DECLARE_PER_CPU(unsigned long, thermal_pressure); +DECLARE_PER_CPU(unsigned long, hw_pressure); -static inline unsigned long topology_get_thermal_pressure(int cpu) +static inline unsigned long topology_get_hw_pressure(int cpu) { - return per_cpu(thermal_pressure, cpu); + return per_cpu(hw_pressure, cpu); } -void topology_update_thermal_pressure(const struct cpumask *cpus, +void topology_update_hw_pressure(const struct cpumask *cpus, unsigned long capped_freq); struct cpu_topology { diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index c906f666ff..c82d567681 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -126,6 +126,7 @@ /* FFA Bus/Device/Driver related */ struct ffa_device { u32 id; + u32 properties; int vm_id; bool mode_32bit; uuid_t uuid; @@ -221,12 +222,29 @@ struct ffa_partition_info { #define FFA_PARTITION_DIRECT_SEND BIT(1) /* partition can send and receive indirect messages. */ #define FFA_PARTITION_INDIRECT_MSG BIT(2) +/* partition can receive notifications */ +#define FFA_PARTITION_NOTIFICATION_RECV BIT(3) /* partition runs in the AArch64 execution state. */ #define FFA_PARTITION_AARCH64_EXEC BIT(8) u32 properties; u32 uuid[4]; }; +static inline +bool ffa_partition_check_property(struct ffa_device *dev, u32 property) +{ + return dev->properties & property; +} + +#define ffa_partition_supports_notify_recv(dev) \ + ffa_partition_check_property(dev, FFA_PARTITION_NOTIFICATION_RECV) + +#define ffa_partition_supports_indirect_msg(dev) \ + ffa_partition_check_property(dev, FFA_PARTITION_INDIRECT_MSG) + +#define ffa_partition_supports_direct_recv(dev) \ + ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV) + /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ struct ffa_send_direct_data { unsigned long data0; /* w3/x3 */ @@ -236,6 +254,14 @@ struct ffa_send_direct_data { unsigned long data4; /* w7/x7 */ }; +struct ffa_indirect_msg_hdr { + u32 flags; + u32 res0; + u32 offset; + u32 send_recv_id; + u32 size; +}; + struct ffa_mem_region_addr_range { /* The base IPA of the constituent memory region, aligned to 4 kiB */ u64 address; @@ -396,6 +422,7 @@ struct ffa_msg_ops { void (*mode_32bit_set)(struct ffa_device *dev); int (*sync_send_receive)(struct ffa_device *dev, struct ffa_send_direct_data *data); + int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz); }; struct ffa_mem_ops { diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h index 3f1fe1774f..4b61b0e577 100644 --- a/include/linux/backing-file.h +++ b/include/linux/backing-file.h @@ -22,6 +22,9 @@ struct backing_file_ctx { struct file *backing_file_open(const struct path *user_path, int flags, const struct path *real_path, const struct cred *cred); +struct file *backing_tmpfile_open(const struct path *user_path, int flags, + const struct path *real_parentpath, + umode_t mode, const struct cred *cred); ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, struct kiocb *iocb, int flags, struct backing_file_ctx *ctx); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 614653e07e..19a1c0e226 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -13,6 +13,7 @@ #include <linux/fb.h> #include <linux/mutex.h> #include <linux/notifier.h> +#include <linux/types.h> /** * enum backlight_update_reason - what method was used to update backlight @@ -110,7 +111,6 @@ enum backlight_scale { }; struct backlight_device; -struct fb_info; /** * struct backlight_ops - backlight operations @@ -160,18 +160,18 @@ struct backlight_ops { int (*get_brightness)(struct backlight_device *); /** - * @check_fb: Check the framebuffer device. + * @controls_device: Check against the display device * - * Check if given framebuffer device is the one bound to this backlight. - * This operation is optional and if not implemented it is assumed that the - * fbdev is always the one bound to the backlight. + * Check if the backlight controls the given display device. This + * operation is optional and if not implemented it is assumed that + * the display is always the one controlled by the backlight. * * RETURNS: * - * If info is NULL or the info matches the fbdev bound to the backlight return true. - * If info does not match the fbdev bound to the backlight return false. + * If display_dev is NULL or display_dev matches the device controlled by + * the backlight, return true. Otherwise return false. */ - int (*check_fb)(struct backlight_device *bd, struct fb_info *info); + bool (*controls_device)(struct backlight_device *bd, struct device *display_dev); }; /** @@ -219,25 +219,6 @@ struct backlight_properties { int power; /** - * @fb_blank: The power state from the FBIOBLANK ioctl. - * - * When the FBIOBLANK ioctl is called @fb_blank is set to the - * blank parameter and the update_status() operation is called. - * - * When the backlight device is enabled @fb_blank is set - * to FB_BLANK_UNBLANK. When the backlight device is disabled - * @fb_blank is set to FB_BLANK_POWERDOWN. - * - * Backlight drivers should avoid using this property. It has been - * replaced by state & BL_CORE_FBLANK (although most drivers should - * use backlight_is_blank() as the preferred means to get the blank - * state). - * - * fb_blank is deprecated and will be removed. - */ - int fb_blank; - - /** * @type: The type of backlight supported. * * The backlight type allows userspace to make appropriate @@ -366,7 +347,6 @@ static inline int backlight_enable(struct backlight_device *bd) return 0; bd->props.power = FB_BLANK_UNBLANK; - bd->props.fb_blank = FB_BLANK_UNBLANK; bd->props.state &= ~BL_CORE_FBBLANK; return backlight_update_status(bd); @@ -382,7 +362,6 @@ static inline int backlight_disable(struct backlight_device *bd) return 0; bd->props.power = FB_BLANK_POWERDOWN; - bd->props.fb_blank = FB_BLANK_POWERDOWN; bd->props.state |= BL_CORE_FBBLANK; return backlight_update_status(bd); @@ -395,15 +374,13 @@ static inline int backlight_disable(struct backlight_device *bd) * Display is expected to be blank if any of these is true:: * * 1) if power in not UNBLANK - * 2) if fb_blank is not UNBLANK - * 3) if state indicate BLANK or SUSPENDED + * 2) if state indicate BLANK or SUSPENDED * * Returns true if display is expected to be blank, false otherwise. */ static inline bool backlight_is_blank(const struct backlight_device *bd) { return bd->props.power != FB_BLANK_UNBLANK || - bd->props.fb_blank != FB_BLANK_UNBLANK || bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 875d792bff..818e936129 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -615,6 +615,13 @@ static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) bl->tail = bl2->tail; } +static inline void bio_list_merge_init(struct bio_list *bl, + struct bio_list *bl2) +{ + bio_list_merge(bl, bl2); + bio_list_init(bl2); +} + static inline void bio_list_merge_head(struct bio_list *bl, struct bio_list *bl2) { @@ -724,6 +731,7 @@ static inline bool bioset_initialized(struct bio_set *bs) bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); +void bio_integrity_unmap_free_user(struct bio *bio); extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); extern bool bio_integrity_prep(struct bio *); @@ -800,6 +808,9 @@ static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, { return -EINVAL; } +static inline void bio_integrity_unmap_free_user(struct bio *bio) +{ +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ @@ -824,5 +835,9 @@ static inline void bio_clear_polled(struct bio *bio) struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); +struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new); + +struct bio *blk_alloc_discard_bio(struct block_device *bdev, + sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask); #endif /* __LINUX_BIO_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index aa40961265..8c4768c44a 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -83,6 +83,10 @@ struct device; * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst * bitmap_get_value8(map, start) Get 8bit value from map at start * bitmap_set_value8(map, value, start) Set 8bit value to map at start + * bitmap_read(map, start, nbits) Read an nbits-sized value from + * map at start + * bitmap_write(map, value, start, nbits) Write an nbits-sized value to + * map at start * * Note, bitmap_zero() and bitmap_fill() operate over the region of * unsigned longs, that is, bits behind bitmap till the unsigned long @@ -222,9 +226,11 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = 0; @@ -234,7 +240,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = ~0UL; @@ -245,7 +251,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = *src; @@ -722,38 +728,83 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask) } /** - * bitmap_get_value8 - get an 8-bit value within a memory region + * bitmap_read - read a value of n-bits from the memory region * @map: address to the bitmap memory region - * @start: bit offset of the 8-bit value; must be a multiple of 8 + * @start: bit offset of the n-bit value + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG * - * Returns the 8-bit value located at the @start bit offset within the @src - * memory region. + * Returns: value of @nbits bits located at the @start bit offset within the + * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return + * value is undefined. */ -static inline unsigned long bitmap_get_value8(const unsigned long *map, - unsigned long start) +static inline unsigned long bitmap_read(const unsigned long *map, + unsigned long start, + unsigned long nbits) { - const size_t index = BIT_WORD(start); - const unsigned long offset = start % BITS_PER_LONG; + size_t index = BIT_WORD(start); + unsigned long offset = start % BITS_PER_LONG; + unsigned long space = BITS_PER_LONG - offset; + unsigned long value_low, value_high; + + if (unlikely(!nbits || nbits > BITS_PER_LONG)) + return 0; + + if (space >= nbits) + return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits); - return (map[index] >> offset) & 0xFF; + value_low = map[index] & BITMAP_FIRST_WORD_MASK(start); + value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits); + return (value_low >> offset) | (value_high << space); } /** - * bitmap_set_value8 - set an 8-bit value within a memory region + * bitmap_write - write n-bit value within a memory region * @map: address to the bitmap memory region - * @value: the 8-bit value; values wider than 8 bits may clobber bitmap - * @start: bit offset of the 8-bit value; must be a multiple of 8 + * @value: value to write, clamped to nbits + * @start: bit offset of the n-bit value + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG. + * + * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(), + * i.e. bits beyond @nbits are ignored: + * + * for (bit = 0; bit < nbits; bit++) + * __assign_bit(start + bit, bitmap, val & BIT(bit)); + * + * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed. */ -static inline void bitmap_set_value8(unsigned long *map, unsigned long value, - unsigned long start) -{ - const size_t index = BIT_WORD(start); - const unsigned long offset = start % BITS_PER_LONG; - - map[index] &= ~(0xFFUL << offset); +static inline void bitmap_write(unsigned long *map, unsigned long value, + unsigned long start, unsigned long nbits) +{ + size_t index; + unsigned long offset; + unsigned long space; + unsigned long mask; + bool fit; + + if (unlikely(!nbits || nbits > BITS_PER_LONG)) + return; + + mask = BITMAP_LAST_WORD_MASK(nbits); + value &= mask; + offset = start % BITS_PER_LONG; + space = BITS_PER_LONG - offset; + fit = space >= nbits; + index = BIT_WORD(start); + + map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start)); map[index] |= value << offset; + if (fit) + return; + + map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits); + map[index + 1] |= (value >> space); } +#define bitmap_get_value8(map, start) \ + bitmap_read(map, start, BITS_PER_BYTE) +#define bitmap_set_value8(map, value, start) \ + bitmap_write(map, value, start, BITS_PER_BYTE) + #endif /* __ASSEMBLY__ */ #endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index f7f5a783da..46d4bdc634 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -8,19 +8,14 @@ #include <uapi/linux/kernel.h> -/* Set bits in the first 'n' bytes when loaded from memory */ -#ifdef __LITTLE_ENDIAN -# define aligned_byte_mask(n) ((1UL << 8*(n))-1) -#else -# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) -#endif - #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) +#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE) + extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w); @@ -201,7 +196,7 @@ static __always_inline __s64 sign_extend64(__u64 value, int index) return (__s64)(value << shift) >> shift; } -static inline unsigned fls_long(unsigned long l) +static inline unsigned int fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); @@ -237,7 +232,7 @@ static inline int get_count_order_long(unsigned long l) * The result is not defined if no bits are set, so check that @word * is non-zero before calling this. */ -static inline unsigned long __ffs64(u64 word) +static inline unsigned int __ffs64(u64 word) { #if BITS_PER_LONG == 32 if (((u32)word) == 0UL) @@ -253,18 +248,12 @@ static inline unsigned long __ffs64(u64 word) * @word: The word to search * @n: Bit to find */ -static inline unsigned long fns(unsigned long word, unsigned int n) +static inline unsigned int fns(unsigned long word, unsigned int n) { - unsigned int bit; - - while (word) { - bit = __ffs(word); - if (n-- == 0) - return bit; - __clear_bit(bit, &word); - } + while (word && n--) + word &= word - 1; - return BITS_PER_LONG; + return word ? __ffs(word) : BITS_PER_LONG; } /** @@ -273,23 +262,11 @@ static inline unsigned long fns(unsigned long word, unsigned int n) * @addr: the address to start counting from * @value: the value to assign */ -static __always_inline void assign_bit(long nr, volatile unsigned long *addr, - bool value) -{ - if (value) - set_bit(nr, addr); - else - clear_bit(nr, addr); -} +#define assign_bit(nr, addr, value) \ + ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr))) -static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, - bool value) -{ - if (value) - __set_bit(nr, addr); - else - __clear_bit(nr, addr); -} +#define __assign_bit(nr, addr, value) \ + ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr))) /** * __ptr_set_bit - Set bit in a pointer's value diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index e253e7bd0d..7428cb4395 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -66,12 +66,6 @@ blk_integrity_queue_supports_integrity(struct request_queue *q) return q->integrity.profile; } -static inline void blk_queue_max_integrity_segments(struct request_queue *q, - unsigned int segs) -{ - q->limits.max_integrity_segments = segs; -} - static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) { @@ -151,10 +145,6 @@ static inline void blk_integrity_register(struct gendisk *d, static inline void blk_integrity_unregister(struct gendisk *d) { } -static inline void blk_queue_max_integrity_segments(struct request_queue *q, - unsigned int segs) -{ -} static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d3d8fd8e22..89ba6b16fe 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -54,8 +54,8 @@ typedef __u32 __bitwise req_flags_t; /* Look at ->special_vec for the actual data payload instead of the bio chain. */ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -/* The per-zone write lock is held for this request */ -#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) +/* The request completion needs to be signaled to zone write pluging. */ +#define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20)) /* ->timeout has been called, don't expire again */ #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) #define RQF_RESV ((__force req_flags_t)(1 << 23)) @@ -1150,85 +1150,4 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, } void blk_dump_rq_flags(struct request *, char *); -#ifdef CONFIG_BLK_DEV_ZONED -static inline unsigned int blk_rq_zone_no(struct request *rq) -{ - return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); -} - -static inline unsigned int blk_rq_zone_is_seq(struct request *rq) -{ - return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); -} - -/** - * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization. - * @rq: Request to examine. - * - * Note: REQ_OP_ZONE_APPEND requests do not require serialization. - */ -static inline bool blk_rq_is_seq_zoned_write(struct request *rq) -{ - return op_needs_zoned_write_locking(req_op(rq)) && - blk_rq_zone_is_seq(rq); -} - -bool blk_req_needs_zone_write_lock(struct request *rq); -bool blk_req_zone_write_trylock(struct request *rq); -void __blk_req_zone_write_lock(struct request *rq); -void __blk_req_zone_write_unlock(struct request *rq); - -static inline void blk_req_zone_write_lock(struct request *rq) -{ - if (blk_req_needs_zone_write_lock(rq)) - __blk_req_zone_write_lock(rq); -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ - if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) - __blk_req_zone_write_unlock(rq); -} - -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return rq->q->disk->seq_zones_wlock && - test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - if (!blk_req_needs_zone_write_lock(rq)) - return true; - return !blk_req_zone_is_write_locked(rq); -} -#else /* CONFIG_BLK_DEV_ZONED */ -static inline bool blk_rq_is_seq_zoned_write(struct request *rq) -{ - return false; -} - -static inline bool blk_req_needs_zone_write_lock(struct request *rq) -{ - return false; -} - -static inline void blk_req_zone_write_lock(struct request *rq) -{ -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ -} -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return false; -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - return true; -} -#endif /* CONFIG_BLK_DEV_ZONED */ - #endif /* BLK_MQ_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index cb1526ec44..781c450049 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -45,12 +45,17 @@ struct block_device { struct request_queue * bd_queue; struct disk_stats __percpu *bd_stats; unsigned long bd_stamp; - bool bd_read_only; /* read-only policy */ - u8 bd_partno; - bool bd_write_holder; - bool bd_has_submit_bio; + atomic_t __bd_flags; // partition number + flags +#define BD_PARTNO 255 // lower 8 bits; assign-once +#define BD_READ_ONLY (1u<<8) // read-only policy +#define BD_WRITE_HOLDER (1u<<9) +#define BD_HAS_SUBMIT_BIO (1u<<10) +#define BD_RO_WARNED (1u<<11) +#ifdef CONFIG_FAIL_MAKE_REQUEST +#define BD_MAKE_IT_FAIL (1u<<12) +#endif dev_t bd_dev; - struct inode *bd_inode; /* will die */ + struct address_space *bd_mapping; /* page cache */ atomic_t bd_openers; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ @@ -65,10 +70,6 @@ struct block_device { struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ struct partition_meta_info *bd_meta_info; -#ifdef CONFIG_FAIL_MAKE_REQUEST - bool bd_make_it_fail; -#endif - bool bd_ro_warned; int bd_writers; /* * keep this out-of-line as it's both big and not needed in the fast @@ -88,15 +89,9 @@ struct block_device { /* * Block error status values. See block/blk-core:blk_errors for the details. - * Alpha cannot write a byte atomically, so we need to use 32-bit value. */ -#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) -typedef u32 __bitwise blk_status_t; -typedef u32 blk_short_t; -#else typedef u8 __bitwise blk_status_t; typedef u16 blk_short_t; -#endif #define BLK_STS_OK 0 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) #define BLK_STS_TIMEOUT ((__force blk_status_t)2) @@ -137,25 +132,13 @@ typedef u16 blk_short_t; #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) /* - * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone - * related resources are unavailable, but the driver can guarantee the queue - * will be rerun in the future once the resources become available again. - * - * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references - * a zone specific resource and IO to a different zone on the same device could - * still be served. Examples of that are zones that are write-locked, but a read - * to the same zone could be served. - */ -#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) - -/* * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion * path if the device returns a status indicating that too many zone resources * are currently open. The same command should be successful if resubmitted * after the number of open zones decreases below the device's limits, which is * reported in the request_queue's max_open_zones. */ -#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15) +#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14) /* * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion @@ -164,20 +147,20 @@ typedef u16 blk_short_t; * after the number of active zones decreases below the device's limits, which * is reported in the request_queue's max_active_zones. */ -#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) +#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15) /* * BLK_STS_OFFLINE is returned from the driver when the target device is offline * or is being taken offline. This could help differentiate the case where a * device is intentionally being shut down from a real I/O error. */ -#define BLK_STS_OFFLINE ((__force blk_status_t)17) +#define BLK_STS_OFFLINE ((__force blk_status_t)16) /* * BLK_STS_DURATION_LIMIT is returned from the driver when the target device * aborted the command because it exceeded one of its Command Duration Limits. */ -#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18) +#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17) /** * blk_path_error - returns true if error may be path related @@ -234,7 +217,12 @@ struct bio { struct bvec_iter bi_iter; - blk_qc_t bi_cookie; + union { + /* for polled bios: */ + blk_qc_t bi_cookie; + /* for plugged zoned writes only: */ + unsigned int __bi_nr_segments; + }; bio_end_io_t *bi_end_io; void *bi_private; #ifdef CONFIG_BLK_CGROUP @@ -304,7 +292,8 @@ enum { BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ BIO_QOS_MERGED, /* but went through rq_qos merge path */ BIO_REMAPPED, - BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ + BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ + BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */ BIO_FLAG_LAST }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 00e62b81a7..24c3692992 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -179,22 +179,22 @@ struct gendisk { #ifdef CONFIG_BLK_DEV_ZONED /* - * Zoned block device information for request dispatch control. - * nr_zones is the total number of zones of the device. This is always - * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones - * bits which indicates if a zone is conventional (bit set) or - * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones - * bits which indicates if a zone is write locked, that is, if a write - * request targeting the zone was dispatched. - * - * Reads of this information must be protected with blk_queue_enter() / - * blk_queue_exit(). Modifying this information is only allowed while - * no requests are being processed. See also blk_mq_freeze_queue() and - * blk_mq_unfreeze_queue(). + * Zoned block device information. Reads of this information must be + * protected with blk_queue_enter() / blk_queue_exit(). Modifying this + * information is only allowed while no requests are being processed. + * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). */ unsigned int nr_zones; + unsigned int zone_capacity; + unsigned int last_zone_capacity; unsigned long *conv_zones_bitmap; - unsigned long *seq_zones_wlock; + unsigned int zone_wplugs_hash_bits; + spinlock_t zone_wplugs_lock; + struct mempool_s *zone_wplugs_pool; + struct hlist_head *zone_wplugs_hash; + struct list_head zone_wplugs_err_list; + struct work_struct zone_wplugs_work; + struct workqueue_struct *zone_wplugs_wq; #endif /* CONFIG_BLK_DEV_ZONED */ #if IS_ENABLED(CONFIG_CDROM) @@ -213,11 +213,6 @@ struct gendisk { struct blk_independent_access_ranges *ia_ranges; }; -static inline bool disk_live(struct gendisk *disk) -{ - return !inode_unhashed(disk->part0->bd_inode); -} - /** * disk_openers - returns how many openers are there for a disk * @disk: disk to check @@ -344,8 +339,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sectors, sector_t nr_sectors); -int blk_revalidate_disk_zones(struct gendisk *disk, - void (*update_driver_data)(struct gendisk *disk)); +int blk_revalidate_disk_zones(struct gendisk *disk); /* * Independent access ranges: struct blk_independent_access_range describes @@ -462,8 +456,6 @@ struct request_queue { atomic_t nr_active_requests_shared_tags; - unsigned int required_elevator_features; - struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; @@ -646,15 +638,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) return sector >> ilog2(disk->queue->limits.chunk_sectors); } -static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) -{ - if (!blk_queue_is_zoned(disk->queue)) - return false; - if (!disk->conv_zones_bitmap) - return true; - return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); -} - static inline void disk_set_max_open_zones(struct gendisk *disk, unsigned int max_open_zones) { @@ -677,6 +660,7 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) return bdev->bd_disk->queue->limits.max_active_zones; } +bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); #else /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int bdev_nr_zones(struct block_device *bdev) { @@ -687,10 +671,6 @@ static inline unsigned int disk_nr_zones(struct gendisk *disk) { return 0; } -static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) -{ - return false; -} static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) { return 0; @@ -704,6 +684,10 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) { return 0; } +static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) +{ + return false; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int blk_queue_depth(struct request_queue *q) @@ -735,15 +719,35 @@ void invalidate_disk(struct gendisk *disk); void set_disk_ro(struct gendisk *disk, bool read_only); void disk_uevent(struct gendisk *disk, enum kobject_action action); +static inline u8 bdev_partno(const struct block_device *bdev) +{ + return atomic_read(&bdev->__bd_flags) & BD_PARTNO; +} + +static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) +{ + return atomic_read(&bdev->__bd_flags) & flag; +} + +static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) +{ + atomic_or(flag, &bdev->__bd_flags); +} + +static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) +{ + atomic_andnot(flag, &bdev->__bd_flags); +} + static inline int get_disk_ro(struct gendisk *disk) { - return disk->part0->bd_read_only || + return bdev_test_flag(disk->part0, BD_READ_ONLY) || test_bit(GD_READ_ONLY, &disk->state); } static inline int bdev_read_only(struct block_device *bdev) { - return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); + return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); } bool set_capacity_and_notify(struct gendisk *disk, sector_t size); @@ -868,9 +872,11 @@ static inline unsigned int bio_zone_no(struct bio *bio) return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); } -static inline unsigned int bio_zone_is_seq(struct bio *bio) +static inline bool bio_straddles_zones(struct bio *bio) { - return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); + return bio_sectors(bio) && + bio_zone_no(bio) != + disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); } /* @@ -907,18 +913,25 @@ int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim); int queue_limits_set(struct request_queue *q, struct queue_limits *lim); +/** + * queue_limits_cancel_update - cancel an atomic update of queue limits + * @q: queue to update + * + * This functions cancels an atomic update of the queue limits started by + * queue_limits_start_update() and should be used when an error occurs after + * starting update. + */ +static inline void queue_limits_cancel_update(struct request_queue *q) +{ + mutex_unlock(&q->limits_lock); +} + /* * Access functions for manipulating queue properties */ -void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); -extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); -extern void blk_queue_max_segments(struct request_queue *, unsigned short); -extern void blk_queue_max_discard_segments(struct request_queue *, - unsigned short); void blk_queue_max_secure_erase_sectors(struct request_queue *q, unsigned int max_sectors); -extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, @@ -935,7 +948,6 @@ void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); -extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, @@ -943,10 +955,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, sector_t offset, const char *pfx); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); -extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); -extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); -extern void blk_queue_dma_alignment(struct request_queue *, int); -extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); @@ -955,17 +963,6 @@ disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); void disk_set_independent_access_ranges(struct gendisk *disk, struct blk_independent_access_ranges *iars); -/* - * Elevator features for blk_queue_required_elevator_features: - */ -/* Supports zoned block devices sequential write constraint */ -#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) - -extern void blk_queue_required_elevator_features(struct request_queue *q, - unsigned int features); -extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, - struct device *dev); - bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); @@ -1110,7 +1107,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, static inline bool bdev_is_partition(struct block_device *bdev) { - return bdev->bd_partno; + return bdev_partno(bdev) != 0; } enum blk_default_limits { @@ -1169,12 +1166,29 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) +static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l) { + unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); + + return min_not_zero(l->max_zone_append_sectors, max_sectors); +} + +static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) +{ + if (!blk_queue_is_zoned(q)) + return 0; + + return queue_limits_max_zone_append_sectors(&q->limits); +} - const struct queue_limits *l = &q->limits; +static inline bool queue_emulates_zone_append(struct request_queue *q) +{ + return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; +} - return min(l->max_zone_append_sectors, l->max_sectors); +static inline bool bdev_emulates_zone_append(struct block_device *bdev) +{ + return queue_emulates_zone_append(bdev_get_queue(bdev)); } static inline unsigned int @@ -1316,18 +1330,6 @@ static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) return disk_zone_no(bdev->bd_disk, sec); } -/* Whether write serialization is required for @op on zoned devices. */ -static inline bool op_needs_zoned_write_locking(enum req_op op) -{ - return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES; -} - -static inline bool bdev_op_is_zoned_write(struct block_device *bdev, - enum req_op op) -{ - return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op); -} - static inline sector_t bdev_zone_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1343,6 +1345,12 @@ static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, return sector & (bdev_zone_sectors(bdev) - 1); } +static inline sector_t bio_offset_from_zone_start(struct bio *bio) +{ + return bdev_offset_from_zone_start(bio->bi_bdev, + bio->bi_iter.bi_sector); +} + static inline bool bdev_is_zone_start(struct block_device *bdev, sector_t sector) { @@ -1379,11 +1387,6 @@ static inline unsigned int blksize_bits(unsigned int size) return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; } -static inline unsigned int block_size(struct block_device *bdev) -{ - return 1 << bdev->bd_inode->i_blkbits; -} - int kblockd_schedule_work(struct work_struct *work); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); @@ -1489,7 +1492,7 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) } int bdev_read_only(struct block_device *bdev); -int set_blocksize(struct block_device *bdev, int size); +int set_blocksize(struct file *file, int size); int lookup_bdev(const char *pathname, dev_t *dev); @@ -1551,6 +1554,8 @@ void blkdev_put_no_open(struct block_device *bdev); struct block_device *I_BDEV(struct inode *inode); struct block_device *file_bdev(struct file *bdev_file); +bool disk_live(struct gendisk *disk); +unsigned int block_size(struct block_device *bdev); #ifdef CONFIG_BLOCK void invalidate_bdev(struct block_device *bdev); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 890e152d55..5e694a3080 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -184,8 +184,8 @@ struct bpf_map_ops { }; enum { - /* Support at most 10 fields in a BTF type */ - BTF_FIELDS_MAX = 10, + /* Support at most 11 fields in a BTF type */ + BTF_FIELDS_MAX = 11, }; enum btf_field_type { @@ -202,6 +202,7 @@ enum btf_field_type { BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE, BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, BPF_REFCOUNT = (1 << 9), + BPF_WORKQUEUE = (1 << 10), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -238,6 +239,7 @@ struct btf_record { u32 field_mask; int spin_lock_off; int timer_off; + int wq_off; int refcount_off; struct btf_field fields[]; }; @@ -312,6 +314,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) return "bpf_spin_lock"; case BPF_TIMER: return "bpf_timer"; + case BPF_WORKQUEUE: + return "bpf_wq"; case BPF_KPTR_UNREF: case BPF_KPTR_REF: return "kptr"; @@ -340,6 +344,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type) return sizeof(struct bpf_spin_lock); case BPF_TIMER: return sizeof(struct bpf_timer); + case BPF_WORKQUEUE: + return sizeof(struct bpf_wq); case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: @@ -367,6 +373,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type) return __alignof__(struct bpf_spin_lock); case BPF_TIMER: return __alignof__(struct bpf_timer); + case BPF_WORKQUEUE: + return __alignof__(struct bpf_wq); case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: @@ -406,6 +414,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */ case BPF_SPIN_LOCK: case BPF_TIMER: + case BPF_WORKQUEUE: case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: @@ -525,6 +534,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst) void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); void bpf_timer_cancel_and_free(void *timer); +void bpf_wq_cancel_and_free(void *timer); void bpf_list_head_free(const struct btf_field *field, void *list_head, struct bpf_spin_lock *spin_lock); void bpf_rb_root_free(const struct btf_field *field, void *rb_root, @@ -1116,8 +1126,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i void *func_addr); void *arch_alloc_bpf_trampoline(unsigned int size); void arch_free_bpf_trampoline(void *image, unsigned int size); -void arch_protect_bpf_trampoline(void *image, unsigned int size); -void arch_unprotect_bpf_trampoline(void *image, unsigned int size); +int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size); int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr); @@ -1266,6 +1275,7 @@ int bpf_dynptr_check_size(u32 size); u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); #ifdef CONFIG_BPF_JIT int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); @@ -1622,6 +1632,12 @@ struct bpf_tracing_link { struct bpf_prog *tgt_prog; }; +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; + u64 cookie; +}; + struct bpf_link_primer { struct bpf_link *link; struct file *file; @@ -2204,6 +2220,7 @@ void bpf_map_free_record(struct bpf_map *map); struct btf_record *btf_record_dup(const struct btf_record *rec); bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); void bpf_obj_free_timer(const struct btf_record *rec, void *obj); +void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj); void bpf_obj_free_fields(const struct btf_record *rec, void *obj); void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); @@ -2244,31 +2261,14 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, gfp_t flags); #else -static inline void * -bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, - int node) -{ - return kmalloc_node(size, flags, node); -} - -static inline void * -bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) -{ - return kzalloc(size, flags); -} - -static inline void * -bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) -{ - return kvcalloc(n, size, flags); -} - -static inline void __percpu * -bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, - gfp_t flags) -{ - return __alloc_percpu_gfp(size, align, flags); -} +#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \ + kmalloc_node(_size, _flags, _node) +#define bpf_map_kzalloc(_map, _size, _flags) \ + kzalloc(_size, _flags) +#define bpf_map_kvcalloc(_map, _n, _size, _flags) \ + kvcalloc(_n, _size, _flags) +#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \ + __alloc_percpu_gfp(_size, _align, _flags) #endif static inline int @@ -3005,6 +3005,7 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); int sock_map_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog); void sock_map_unhash(struct sock *sk); void sock_map_destroy(struct sock *sk); @@ -3103,6 +3104,11 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, { return -EINVAL; } + +static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h new file mode 100644 index 0000000000..a41e71d4e2 --- /dev/null +++ b/include/linux/bpf_crypto.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#ifndef _BPF_CRYPTO_H +#define _BPF_CRYPTO_H + +struct bpf_crypto_type { + void *(*alloc_tfm)(const char *algo); + void (*free_tfm)(void *tfm); + int (*has_algo)(const char *algo); + int (*setkey)(void *tfm, const u8 *key, unsigned int keylen); + int (*setauthsize)(void *tfm, unsigned int authsize); + int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv); + int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv); + unsigned int (*ivsize)(void *tfm); + unsigned int (*statesize)(void *tfm); + u32 (*get_flags)(void *tfm); + struct module *owner; + char name[14]; +}; + +int bpf_crypto_register_type(const struct bpf_crypto_type *type); +int bpf_crypto_unregister_type(const struct bpf_crypto_type *type); + +#endif /* _BPF_CRYPTO_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e742db470a..ff2a6cdb1f 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -421,11 +421,13 @@ struct bpf_verifier_state { struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; + u32 active_preempt_lock; /* If this state was ever pointed-to by other state's loop_entry field * this flag would be set to true. Used to avoid freeing such states * while they are still in use. */ bool used_as_loop_entry; + bool in_sleepable; /* first and last insn idx of this verifier state */ u32 first_insn_idx; @@ -502,6 +504,13 @@ struct bpf_loop_inline_state { u32 callback_subprogno; /* valid when fit_for_inline is true */ }; +/* pointer and state for maps */ +struct bpf_map_ptr_state { + struct bpf_map *map_ptr; + bool poison; + bool unpriv; +}; + /* Possible states for alu_state member. */ #define BPF_ALU_SANITIZE_SRC (1U << 0) #define BPF_ALU_SANITIZE_DST (1U << 1) @@ -514,7 +523,7 @@ struct bpf_loop_inline_state { struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ - unsigned long map_ptr_state; /* pointer/poison value for maps */ + struct bpf_map_ptr_state map_ptr_state; s32 call_imm; /* saved imm field of call insn */ u32 alu_limit; /* limit for add/sub register with pointer */ struct { @@ -837,7 +846,7 @@ static inline u32 type_flag(u32 type) /* only use after check_attach_btf_id() */ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) { - return prog->type == BPF_PROG_TYPE_EXT ? + return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ? prog->aux->dst_prog->type : prog->type; } diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index 79b2f78eec..1af241525a 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -65,9 +65,9 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset, return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); } -static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) +static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len) { - void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN); + void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); @@ -77,6 +77,7 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) } return p; } +#define kvmemdup_bpfptr(...) alloc_hooks(kvmemdup_bpfptr_noprof(__VA_ARGS__)) static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) { diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 9e97ced289..14fa932686 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -65,7 +65,8 @@ struct bsg_job { void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len); struct request_queue *bsg_setup_queue(struct device *dev, const char *name, - bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size); + struct queue_limits *lim, bsg_job_fn *job_fn, + bsg_timeout_fn *timeout, int dd_job_size); void bsg_remove_queue(struct request_queue *q); void bsg_job_put(struct bsg_job *job); int __must_check bsg_job_get(struct bsg_job *job); diff --git a/include/linux/btf.h b/include/linux/btf.h index f9e56fd12a..7c3e40c329 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -82,7 +82,7 @@ * as to avoid issues such as the compiler inlining or eliding either a static * kfunc, or a global kfunc in an LTO build. */ -#define __bpf_kfunc __used noinline +#define __bpf_kfunc __used __retain noinline #define __bpf_kfunc_start_defs() \ __diag_push(); \ diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index e24aabfe8e..c0e3e1426a 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -3,6 +3,8 @@ #ifndef _LINUX_BTF_IDS_H #define _LINUX_BTF_IDS_H +#include <linux/types.h> /* for u32 */ + struct btf_id_set { u32 cnt; u32 ids[]; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index d78454a4dd..e022e40b09 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -303,12 +303,38 @@ static inline void put_bh(struct buffer_head *bh) atomic_dec(&bh->b_count); } +/** + * brelse - Release a buffer. + * @bh: The buffer to release. + * + * Decrement a buffer_head's reference count. If @bh is NULL, this + * function is a no-op. + * + * If all buffers on a folio have zero reference count, are clean + * and unlocked, and if the folio is unlocked and not under writeback + * then try_to_free_buffers() may strip the buffers from the folio in + * preparation for freeing it (sometimes, rarely, buffers are removed + * from a folio but it ends up not being freed, and buffers may later + * be reattached). + * + * Context: Any context. + */ static inline void brelse(struct buffer_head *bh) { if (bh) __brelse(bh); } +/** + * bforget - Discard any dirty data in a buffer. + * @bh: The buffer to forget. + * + * Call this function instead of brelse() if the data written to a buffer + * no longer needs to be written back. It will clear the buffer's dirty + * flag so writeback of this buffer will be skipped. + * + * Context: Any context. + */ static inline void bforget(struct buffer_head *bh) { if (bh) @@ -338,7 +364,7 @@ static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, { gfp_t gfp; - gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); gfp |= __GFP_NOFAIL; return bdev_getblk(bdev, block, size, gfp); @@ -349,7 +375,7 @@ static inline struct buffer_head *__getblk(struct block_device *bdev, { gfp_t gfp; - gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); gfp |= __GFP_MOVABLE | __GFP_NOFAIL; return bdev_getblk(bdev, block, size, gfp); @@ -437,17 +463,21 @@ static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[], } /** - * __bread() - reads a specified block and returns the bh - * @bdev: the block_device to read from - * @block: number of block - * @size: size (in bytes) to read + * __bread() - Read a block. + * @bdev: The block device to read from. + * @block: Block number in units of block size. + * @size: The block size of this device in bytes. * - * Reads a specified block, and returns buffer head that contains it. - * The page cache is allocated from movable area so that it can be migrated. - * It returns NULL if the block was unreadable. + * Read a specified block, and return the buffer head that refers + * to it. The memory is allocated from the movable area so that it can + * be migrated. The returned buffer head has its refcount increased. + * The caller should call brelse() when it has finished with the buffer. + * + * Context: May sleep waiting for I/O. + * Return: NULL if the block was unreadable. */ -static inline struct buffer_head * -__bread(struct block_device *bdev, sector_t block, unsigned size) +static inline struct buffer_head *__bread(struct block_device *bdev, + sector_t block, unsigned size) { return __bread_gfp(bdev, block, size, __GFP_MOVABLE); } diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h new file mode 100644 index 0000000000..18e0a2fc38 --- /dev/null +++ b/include/linux/bus/stm32_firewall_device.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#ifndef STM32_FIREWALL_DEVICE_H +#define STM32_FIREWALL_DEVICE_H + +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#define STM32_FIREWALL_MAX_EXTRA_ARGS 5 + +/* Opaque reference to stm32_firewall_controller */ +struct stm32_firewall_controller; + +/** + * struct stm32_firewall - Information on a device's firewall. Each device can have more than one + * firewall. + * + * @firewall_ctrl: Pointer referencing a firewall controller of the device. It is + * opaque so a device cannot manipulate the controller's ops or access + * the controller's data + * @extra_args: Extra arguments that are implementation dependent + * @entry: Name of the firewall entry + * @extra_args_size: Number of extra arguments + * @firewall_id: Firewall ID associated the device for this firewall controller + */ +struct stm32_firewall { + struct stm32_firewall_controller *firewall_ctrl; + u32 extra_args[STM32_FIREWALL_MAX_EXTRA_ARGS]; + const char *entry; + size_t extra_args_size; + u32 firewall_id; +}; + +#if IS_ENABLED(CONFIG_STM32_FIREWALL) +/** + * stm32_firewall_get_firewall - Get the firewall(s) associated to given device. + * The firewall controller reference is always the first argument + * of each of the access-controller property entries. + * The firewall ID is always the second argument of each of the + * access-controller property entries. + * If there's no argument linked to the phandle, then the firewall ID + * field is set to U32_MAX, which is an invalid ID. + * + * @np: Device node to parse + * @firewall: Array of firewall references + * @nb_firewall: Number of firewall references to get. Must be at least 1. + * + * Returns 0 on success, -ENODEV if there's no match with a firewall controller or appropriate errno + * code if error occurred. + */ +int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, + unsigned int nb_firewall); + +/** + * stm32_firewall_grant_access - Request firewall access rights and grant access. + * + * @firewall: Firewall reference containing the ID to check against its firewall + * controller + * + * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or + * appropriate errno code if error occurred + */ +int stm32_firewall_grant_access(struct stm32_firewall *firewall); + +/** + * stm32_firewall_release_access - Release access granted from a call to + * stm32_firewall_grant_access(). + * + * @firewall: Firewall reference containing the ID to check against its firewall + * controller + */ +void stm32_firewall_release_access(struct stm32_firewall *firewall); + +/** + * stm32_firewall_grant_access_by_id - Request firewall access rights of a given device + * based on a specific firewall ID + * + * Warnings: + * There is no way to ensure that the given ID will correspond to the firewall referenced in the + * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this + * function must be used with caution. + * This function should be used for subsystem resources that do not have the same firewall ID + * as their parent. + * U32_MAX is an invalid ID. + * + * @firewall: Firewall reference containing the firewall controller + * @subsystem_id: Firewall ID of the subsystem resource + * + * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or + * appropriate errno code if error occurred + */ +int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id); + +/** + * stm32_firewall_release_access_by_id - Release access granted from a call to + * stm32_firewall_grant_access_by_id(). + * + * Warnings: + * There is no way to ensure that the given ID will correspond to the firewall referenced in the + * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this + * function must be used with caution. + * This function should be used for subsystem resources that do not have the same firewall ID + * as their parent. + * U32_MAX is an invalid ID. + * + * @firewall: Firewall reference containing the firewall controller + * @subsystem_id: Firewall ID of the subsystem resource + */ +void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id); + +#else /* CONFIG_STM32_FIREWALL */ + +int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, + unsigned int nb_firewall); +{ + return -ENODEV; +} + +int stm32_firewall_grant_access(struct stm32_firewall *firewall) +{ + return -ENODEV; +} + +void stm32_firewall_release_access(struct stm32_firewall *firewall) +{ +} + +int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + return -ENODEV; +} + +void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ +} + +#endif /* CONFIG_STM32_FIREWALL */ +#endif /* STM32_FIREWALL_DEVICE_H */ diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index 11a92a9460..5f904591fa 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h @@ -27,17 +27,13 @@ ##__VA_ARGS__) # else /* faux printk call just to see any compiler warnings. */ -# define dout(fmt, ...) do { \ - if (0) \ - printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ - } while (0) -# define doutc(client, fmt, ...) do { \ - if (0) \ - printk(KERN_DEBUG "[%pU %llu] " fmt, \ - &client->fsid, \ - client->monc.auth->global_id, \ - ##__VA_ARGS__); \ - } while (0) +# define dout(fmt, ...) \ + no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) +# define doutc(client, fmt, ...) \ + no_printk(KERN_DEBUG "[%pU %llu] " fmt, \ + &client->fsid, \ + client->monc.auth->global_id, \ + ##__VA_ARGS__) # endif #else diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 34aaf0e87d..2150ca6039 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -690,7 +690,7 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); void cgroup_rstat_flush(struct cgroup *cgrp); void cgroup_rstat_flush_hold(struct cgroup *cgrp); -void cgroup_rstat_flush_release(void); +void cgroup_rstat_flush_release(struct cgroup *cgrp); /* * Basic resource stats. diff --git a/include/linux/closure.h b/include/linux/closure.h index c554c6a087..2af4442710 100644 --- a/include/linux/closure.h +++ b/include/linux/closure.h @@ -159,6 +159,7 @@ struct closure { #ifdef CONFIG_DEBUG_CLOSURES #define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_ALIVE 0xc054a11e +#define CLOSURE_MAGIC_STACK 0xc05451cc unsigned int magic; struct list_head all; @@ -194,6 +195,18 @@ static inline void closure_sync(struct closure *cl) __closure_sync(cl); } +int __closure_sync_timeout(struct closure *cl, unsigned long timeout); + +static inline int closure_sync_timeout(struct closure *cl, unsigned long timeout) +{ +#ifdef CONFIG_DEBUG_CLOSURES + BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened); +#endif + return cl->closure_get_happened + ? __closure_sync_timeout(cl, timeout) + : 0; +} + #ifdef CONFIG_DEBUG_CLOSURES void closure_debug_create(struct closure *cl); @@ -273,6 +286,21 @@ static inline void closure_get(struct closure *cl) } /** + * closure_get_not_zero + */ +static inline bool closure_get_not_zero(struct closure *cl) +{ + unsigned old = atomic_read(&cl->remaining); + do { + if (!(old & CLOSURE_REMAINING_MASK)) + return false; + + } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1)); + + return true; +} + +/** * closure_init - Initialize a closure, setting the refcount to 1 * @cl: closure to initialize * @parent: parent of the new closure. cl will take a refcount on it for its @@ -296,6 +324,18 @@ static inline void closure_init_stack(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +#ifdef CONFIG_DEBUG_CLOSURES + cl->magic = CLOSURE_MAGIC_STACK; +#endif +} + +static inline void closure_init_stack_release(struct closure *cl) +{ + memset(cl, 0, sizeof(struct closure)); + atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +#ifdef CONFIG_DEBUG_CLOSURES + cl->magic = CLOSURE_MAGIC_STACK; +#endif } /** @@ -343,6 +383,8 @@ do { \ */ #define closure_return(_cl) continue_at((_cl), NULL, NULL) +void closure_return_sync(struct closure *cl); + /** * continue_at_nobarrier - jump to another function without barrier * diff --git a/include/linux/cmpxchg-emu.h b/include/linux/cmpxchg-emu.h new file mode 100644 index 0000000000..998deec677 --- /dev/null +++ b/include/linux/cmpxchg-emu.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Emulated 1-byte and 2-byte cmpxchg operations for architectures + * lacking direct support for these sizes. These are implemented in terms + * of 4-byte cmpxchg operations. + * + * Copyright (C) 2024 Paul E. McKenney. + */ + +#ifndef __LINUX_CMPXCHG_EMU_H +#define __LINUX_CMPXCHG_EMU_H + +uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new); + +#endif /* __LINUX_CMPXCHG_EMU_H */ diff --git a/include/linux/codetag.h b/include/linux/codetag.h new file mode 100644 index 0000000000..c2a579ccd4 --- /dev/null +++ b/include/linux/codetag.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * code tagging framework + */ +#ifndef _LINUX_CODETAG_H +#define _LINUX_CODETAG_H + +#include <linux/types.h> + +struct codetag_iterator; +struct codetag_type; +struct codetag_module; +struct seq_buf; +struct module; + +/* + * An instance of this structure is created in a special ELF section at every + * code location being tagged. At runtime, the special section is treated as + * an array of these. + */ +struct codetag { + unsigned int flags; /* used in later patches */ + unsigned int lineno; + const char *modname; + const char *function; + const char *filename; +} __aligned(8); + +union codetag_ref { + struct codetag *ct; +}; + +struct codetag_type_desc { + const char *section; + size_t tag_size; + void (*module_load)(struct codetag_type *cttype, + struct codetag_module *cmod); + bool (*module_unload)(struct codetag_type *cttype, + struct codetag_module *cmod); +}; + +struct codetag_iterator { + struct codetag_type *cttype; + struct codetag_module *cmod; + unsigned long mod_id; + struct codetag *ct; +}; + +#ifdef MODULE +#define CT_MODULE_NAME KBUILD_MODNAME +#else +#define CT_MODULE_NAME NULL +#endif + +#define CODE_TAG_INIT { \ + .modname = CT_MODULE_NAME, \ + .function = __func__, \ + .filename = __FILE__, \ + .lineno = __LINE__, \ + .flags = 0, \ +} + +void codetag_lock_module_list(struct codetag_type *cttype, bool lock); +bool codetag_trylock_module_list(struct codetag_type *cttype); +struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype); +struct codetag *codetag_next_ct(struct codetag_iterator *iter); + +void codetag_to_text(struct seq_buf *out, struct codetag *ct); + +struct codetag_type * +codetag_register_type(const struct codetag_type_desc *desc); + +#if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) +void codetag_load_module(struct module *mod); +bool codetag_unload_module(struct module *mod); +#else +static inline void codetag_load_module(struct module *mod) {} +static inline bool codetag_unload_module(struct module *mod) { return true; } +#endif + +#endif /* _LINUX_CODETAG_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 49feac0162..4c1a39dcb6 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -118,3 +118,13 @@ #define __diag_ignore_all(option, comment) \ __diag_clang(13, ignore, option) + +/* + * clang has horrible behavior with "g" or "rm" constraints for asm + * inputs, turning them into something worse than "m". Avoid using + * constraints with multiple possible uses (but "ir" seems to be ok): + * + * https://github.com/llvm/llvm-project/issues/20571 + */ +#define ASM_INPUT_G "ir" +#define ASM_INPUT_RM "r" diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 8bdf6e0918..32284cd26d 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -362,6 +362,19 @@ #define __used __attribute__((__used__)) /* + * The __used attribute guarantees that the attributed variable will be + * always emitted by a compiler. It doesn't prevent the compiler from + * throwing 'unused' warnings when it can't detect how the variable is + * actually used. It's a compiler implementation details either emit + * the warning in that case or not. + * + * The combination of both 'used' and 'unused' attributes ensures that + * the variable would be emitted, and will not trigger 'unused' warnings. + * The attribute is applicable for functions, static and global variables. + */ +#define __always_used __used __maybe_unused + +/* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 8f8236317d..f14c275950 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -143,6 +143,29 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __preserve_most #endif +/* + * Annotating a function/variable with __retain tells the compiler to place + * the object in its own section and set the flag SHF_GNU_RETAIN. This flag + * instructs the linker to retain the object during garbage-cleanup or LTO + * phases. + * + * Note that the __used macro is also used to prevent functions or data + * being optimized out, but operates at the compiler/IR-level and may still + * allow unintended removal of objects during linking. + * + * Optional: only supported since gcc >= 11, clang >= 13 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#retain + */ +#if __has_attribute(__retain__) && \ + (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \ + defined(CONFIG_LTO_CLANG)) +# define __retain __attribute__((__retain__)) +#else +# define __retain +#endif + /* Compiler specific macros. */ #ifdef __clang__ #include <linux/compiler-clang.h> @@ -273,9 +296,16 @@ struct ftrace_likely_data { * disable all instrumentation. See Kconfig.kcsan where this is mandatory. */ # define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation +/* + * Type qualifier to mark variables where all data-racy accesses should be + * ignored by KCSAN. Note, the implementation simply marks these variables as + * volatile, since KCSAN will treat such accesses as "marked". + */ +# define __data_racy volatile # define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused #else # define __no_kcsan +# define __data_racy #endif #ifdef __SANITIZE_MEMORY__ @@ -293,6 +323,17 @@ struct ftrace_likely_data { #define __no_sanitize_or_inline __always_inline #endif +/* + * Apply __counted_by() when the Endianness matches to increase test coverage. + */ +#ifdef __LITTLE_ENDIAN +#define __counted_by_le(member) __counted_by(member) +#define __counted_by_be(member) +#else +#define __counted_by_le(member) +#define __counted_by_be(member) __counted_by(member) +#endif + /* Do not trap wrapping arithmetic within an annotated function. */ #ifdef CONFIG_UBSAN_SIGNED_WRAP # define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow"))) @@ -391,6 +432,15 @@ struct ftrace_likely_data { #define asm_goto_output(x...) asm volatile goto(x) #endif +/* + * Clang has trouble with constraints with multiple + * alternative behaviors (mainly "g" and "rm"). + */ +#ifndef ASM_INPUT_G + #define ASM_INPUT_G "g" + #define ASM_INPUT_RM "rm" +#endif + #ifdef CONFIG_CC_HAS_ASM_INLINE #define asm_inline asm __inline #else diff --git a/include/linux/coredump.h b/include/linux/coredump.h index d3eba43601..0904ba0103 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -30,6 +30,8 @@ struct coredump_params { struct core_vma_metadata *vma_meta; }; +extern unsigned int core_file_note_size_limit; + /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 5f288d4754..f09ace9217 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/perf_event.h> #include <linux/sched.h> +#include <linux/platform_device.h> /* Peripheral id registers (0xFD0-0xFEC) */ #define CORESIGHT_PERIPHIDR4 0xfd0 @@ -658,4 +659,9 @@ coresight_find_output_type(struct coresight_platform_data *pdata, enum coresight_dev_type type, union coresight_dev_subtype subtype); +int coresight_init_driver(const char *drv, struct amba_driver *amba_drv, + struct platform_driver *pdev_drv); + +void coresight_remove_driver(struct amba_driver *amba_drv, + struct platform_driver *pdev_drv); #endif /* _LINUX_COREISGHT_H */ diff --git a/include/linux/counter.h b/include/linux/counter.h index b767b5c821..426b7d58a4 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -6,14 +6,15 @@ #ifndef _COUNTER_H_ #define _COUNTER_H_ +#include <linux/array_size.h> #include <linux/cdev.h> #include <linux/device.h> -#include <linux/kernel.h> #include <linux/kfifo.h> #include <linux/mutex.h> #include <linux/spinlock_types.h> #include <linux/types.h> #include <linux/wait.h> + #include <uapi/linux/counter.h> struct counter_device; @@ -601,6 +602,9 @@ struct counter_array { #define COUNTER_COMP_FLOOR(_read, _write) \ COUNTER_COMP_COUNT_U64("floor", _read, _write) +#define COUNTER_COMP_FREQUENCY(_read) \ + COUNTER_COMP_SIGNAL_U64("frequency", _read, NULL) + #define COUNTER_COMP_POLARITY(_read, _write, _available) \ { \ .type = COUNTER_COMP_SIGNAL_POLARITY, \ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9956afb9ac..20f7e98ee8 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -241,6 +241,12 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); bool has_target_index(void); + +DECLARE_PER_CPU(unsigned long, cpufreq_pressure); +static inline unsigned long cpufreq_get_pressure(int cpu) +{ + return READ_ONCE(per_cpu(cpufreq_pressure, cpu)); +} #else static inline unsigned int cpufreq_get(unsigned int cpu) { @@ -264,6 +270,10 @@ static inline bool cpufreq_supports_freq_invariance(void) } static inline void disable_cpufreq(void) { } static inline void cpufreq_update_limits(unsigned int cpu) { } +static inline unsigned long cpufreq_get_pressure(int cpu) +{ + return 0; +} #endif #ifdef CONFIG_CPU_FREQ_STAT diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 35e78ddb2b..7a5785f405 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -146,6 +146,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_LOONGARCH_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1c29947db8..23686bed44 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -188,6 +188,23 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask } /** + * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3 + * @srcp1: the first input + * @srcp2: the second input + * @srcp3: the third input + * + * Return: >= nr_cpu_ids if no cpus set in all. + */ +static inline +unsigned int cpumask_first_and_and(const struct cpumask *srcp1, + const struct cpumask *srcp2, + const struct cpumask *srcp3) +{ + return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), + cpumask_bits(srcp3), small_cpumask_bits); +} + +/** * cpumask_last - get the last CPU in a cpumask * @srcp: - the cpumask pointer * @@ -369,6 +386,16 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits) /** + * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask. + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask pointer + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_from(cpu, mask) \ + for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits) + +/** * cpumask_any_but - return a "random" in a cpumask, but not this one. * @mask: the cpumask to search * @cpu: the cpu to ignore. @@ -389,6 +416,29 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } /** + * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one. + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * @cpu: the cpu to ignore + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline +unsigned int cpumask_any_and_but(const struct cpumask *mask1, + const struct cpumask *mask2, + unsigned int cpu) +{ + unsigned int i; + + cpumask_check(cpu); + i = cpumask_first_and(mask1, mask2); + if (i != cpu) + return i; + + return cpumask_next_and(cpu, mask1, mask2); +} + +/** * cpumask_nth - get the Nth cpu in a cpumask * @srcp: the cpumask pointer * @cpu: the Nth cpu to find, starting from 0 @@ -494,6 +544,22 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) } /** + * cpumask_assign_cpu - assign a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + * @bool: the value to assign + */ +static __always_inline void cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value) +{ + assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value); +} + +static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value) +{ + __assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value); +} + +/** * cpumask_test_cpu - test for a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer @@ -853,7 +919,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) */ static inline unsigned int cpumask_size(void) { - return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long); + return bitmap_size(large_cpumask_bits); } /* @@ -1017,11 +1083,6 @@ void init_cpu_present(const struct cpumask *src); void init_cpu_possible(const struct cpumask *src); void init_cpu_online(const struct cpumask *src); -static inline void reset_cpu_possible_mask(void) -{ - bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); -} - static inline void set_cpu_possible(unsigned int cpu, bool possible) { diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index d33352c2e3..4430533631 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -37,17 +37,16 @@ static inline void arch_kexec_unprotect_crashkres(void) { } #ifndef arch_crash_handle_hotplug_event -static inline void arch_crash_handle_hotplug_event(struct kimage *image) { } +static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { } #endif -int crash_check_update_elfcorehdr(void); +int crash_check_hotplug_support(void); -#ifndef crash_hotplug_cpu_support -static inline int crash_hotplug_cpu_support(void) { return 0; } -#endif - -#ifndef crash_hotplug_memory_support -static inline int crash_hotplug_memory_support(void) { return 0; } +#ifndef arch_crash_hotplug_support +static inline int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags) +{ + return 0; +} #endif #ifndef crash_get_elfcorehdr_size diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h index 03fa6d50d4..60b2502028 100644 --- a/include/linux/cxl-event.h +++ b/include/linux/cxl-event.h @@ -3,6 +3,10 @@ #ifndef _LINUX_CXL_EVENT_H #define _LINUX_CXL_EVENT_H +#include <linux/types.h> +#include <linux/uuid.h> +#include <linux/workqueue_types.h> + /* * Common Event Record Format * CXL rev 3.0 section 8.2.9.2.1; Table 8-42 @@ -91,11 +95,21 @@ struct cxl_event_mem_module { u8 reserved[0x3d]; } __packed; +/* + * General Media or DRAM Event Common Fields + * - provides common access to phys_addr + */ +struct cxl_event_common { + struct cxl_event_record_hdr hdr; + __le64 phys_addr; +} __packed; + union cxl_event { struct cxl_event_generic generic; struct cxl_event_gen_media gen_media; struct cxl_event_dram dram; struct cxl_event_mem_module mem_module; + struct cxl_event_common common; } __packed; /* @@ -140,4 +154,29 @@ struct cxl_cper_event_rec { union cxl_event event; } __packed; +struct cxl_cper_work_data { + enum cxl_event_type event_type; + struct cxl_cper_event_rec rec; +}; + +#ifdef CONFIG_ACPI_APEI_GHES +int cxl_cper_register_work(struct work_struct *work); +int cxl_cper_unregister_work(struct work_struct *work); +int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd); +#else +static inline int cxl_cper_register_work(struct work_struct *work) +{ + return 0; +} + +static inline int cxl_cper_unregister_work(struct work_struct *work) +{ + return 0; +} +static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) +{ + return 0; +} +#endif + #endif /* _LINUX_CXL_EVENT_H */ diff --git a/include/linux/damon.h b/include/linux/damon.h index 886d07294f..f7da65e1ac 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -297,6 +297,7 @@ struct damos_stat { * enum damos_filter_type - Type of memory for &struct damos_filter * @DAMOS_FILTER_TYPE_ANON: Anonymous pages. * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages. + * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages. * @DAMOS_FILTER_TYPE_ADDR: Address range. * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target. * @NR_DAMOS_FILTER_TYPES: Number of filter types. @@ -315,6 +316,7 @@ struct damos_stat { enum damos_filter_type { DAMOS_FILTER_TYPE_ANON, DAMOS_FILTER_TYPE_MEMCG, + DAMOS_FILTER_TYPE_YOUNG, DAMOS_FILTER_TYPE_ADDR, DAMOS_FILTER_TYPE_TARGET, NR_DAMOS_FILTER_TYPES, diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h index c008169ed2..c8f7eb6cc1 100644 --- a/include/linux/devcoredump.h +++ b/include/linux/devcoredump.h @@ -63,6 +63,8 @@ void dev_coredumpm(struct device *dev, struct module *owner, void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp); + +void dev_coredump_put(struct device *dev); #else static inline void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) @@ -85,6 +87,9 @@ static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, { _devcd_free_sgtable(table); } +static inline void dev_coredump_put(struct device *dev) +{ +} #endif /* CONFIG_DEV_COREDUMP */ #endif /* __DEVCOREDUMP_H */ diff --git a/include/linux/device.h b/include/linux/device.h index b9f5464f44..ace039151c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -132,6 +132,8 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +ssize_t device_show_string(struct device *dev, struct device_attribute *attr, + char *buf); /** * DEVICE_ATTR - Define a device attribute. @@ -251,6 +253,19 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } +/** + * DEVICE_STRING_ATTR_RO - Define a device attribute backed by a r/o string. + * @_name: Attribute name. + * @_mode: File mode. + * @_var: Identifier of string. + * + * Like DEVICE_ULONG_ATTR(), but @_var is a string. Because the length of the + * string allocation is unknown, the attribute must be read-only. + */ +#define DEVICE_STRING_ATTR_RO(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, (_mode) & ~0222, device_show_string, NULL), (_var) } + #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) @@ -691,6 +706,7 @@ struct device_physical_location { * and optionall (if the coherent mask is large enough) also * for dma allocations. This flag is managed by the dma ops * instance from ->dma_supported. + * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -803,6 +819,9 @@ struct device { #ifdef CONFIG_DMA_OPS_BYPASS bool dma_ops_bypass : 1; #endif +#ifdef CONFIG_DMA_NEED_SYNC + bool dma_skip_sync:1; +#endif }; /** @@ -1201,23 +1220,10 @@ static inline void device_remove_group(struct device *dev, return device_remove_groups(dev, groups); } -int __must_check devm_device_add_groups(struct device *dev, - const struct attribute_group **groups); int __must_check devm_device_add_group(struct device *dev, const struct attribute_group *grp); /* - * Platform "fixup" functions - allow the platform to have their say - * about devices and actions that the general device layer doesn't - * know about. - */ -/* Notify platform of device discovery */ -extern int (*platform_notify)(struct device *dev); - -extern int (*platform_notify_remove)(struct device *dev); - - -/* * get_device - atomically increment the reference count for the device. * */ diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h index 7489180220..708ca91314 100644 --- a/include/linux/devm-helpers.h +++ b/include/linux/devm-helpers.h @@ -41,7 +41,7 @@ static inline void devm_delayed_work_drop(void *res) * detached. A few drivers need delayed work which must be cancelled before * driver is detached to avoid accessing removed resources. * devm_delayed_work_autocancel() can be used to omit the explicit - * cancelleation when driver is detached. + * cancellation when driver is detached. */ static inline int devm_delayed_work_autocancel(struct device *dev, struct delayed_work *w, @@ -66,7 +66,7 @@ static inline void devm_work_drop(void *res) * A few drivers need to queue work which must be cancelled before driver * is detached to avoid accessing removed resources. * devm_work_autocancel() can be used to omit the explicit - * cancelleation when driver is detached. + * cancellation when driver is detached. */ static inline int devm_work_autocancel(struct device *dev, struct work_struct *w, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8ff4add71f..36216d28d8 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -370,8 +370,10 @@ struct dma_buf { */ struct module *owner; +#if IS_ENABLED(CONFIG_DEBUG_FS) /** @list_node: node for dma_buf accounting and debugging. */ struct list_head list_node; +#endif /** @priv: exporter specific private data for this buffer object. */ void *priv; diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 3eb3589ff4..edbe13d007 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -54,6 +54,24 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev, return (phys_addr_t)-1; } +static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map) +{ + dma_addr_t ret = (dma_addr_t)U64_MAX; + + for (; map->size; map++) + ret = min(ret, map->dma_start); + return ret; +} + +static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map) +{ + dma_addr_t ret = 0; + + for (; map->size; map++) + ret = max(ret, map->dma_start + map->size - 1); + return ret; +} + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include <asm/dma-direct.h> #ifndef phys_to_dma_unencrypted diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 4bdf0b96da..ad9e2506c2 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -86,10 +86,8 @@ dma_fence_chain_contained(struct dma_fence *fence) * * Returns a new struct dma_fence_chain object or NULL on failure. */ -static inline struct dma_fence_chain *dma_fence_chain_alloc(void) -{ - return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); -}; +#define dma_fence_chain_alloc() \ + ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL)) /** * dma_fence_chain_free diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 4abc60f042..02a1c82589 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -18,8 +18,11 @@ struct iommu_ops; * * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. + * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is + * coherent and it's not an SWIOTLB buffer. */ #define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) +#define DMA_F_CAN_SKIP_SYNC (1 << 1) struct dma_map_ops { unsigned int flags; @@ -29,7 +32,7 @@ struct dma_map_ops { unsigned long attrs); void (*free)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs); - struct page *(*alloc_pages)(struct device *dev, size_t size, + struct page *(*alloc_pages_op)(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, @@ -273,6 +276,15 @@ static inline bool dev_is_dma_coherent(struct device *dev) } #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ +static inline void dma_reset_need_sync(struct device *dev) +{ +#ifdef CONFIG_DMA_NEED_SYNC + /* Reset it only once so that the function can be called on hotpath */ + if (unlikely(dev->dma_skip_sync)) + dev->dma_skip_sync = false; +#endif +} + /* * Check whether potential kmalloc() buffers are safe for non-coherent DMA. */ @@ -426,11 +438,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, #endif #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent); +void arch_setup_dma_ops(struct device *dev, bool coherent); #else -static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, bool coherent) +static inline void arch_setup_dma_ops(struct device *dev, bool coherent) { } #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4a658de44e..f693aafe22 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -117,14 +117,6 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir); -void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir); -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, @@ -147,7 +139,6 @@ u64 dma_get_required_mask(struct device *dev); bool dma_addressing_limited(struct device *dev); size_t dma_max_mapping_size(struct device *dev); size_t dma_opt_mapping_size(struct device *dev); -bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); @@ -195,22 +186,6 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { } -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction dir) -{ -} -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction dir) -{ -} static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return -ENOMEM; @@ -277,10 +252,6 @@ static inline size_t dma_opt_mapping_size(struct device *dev) { return 0; } -static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) -{ - return false; -} static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; @@ -310,6 +281,82 @@ static inline int dma_mmap_noncontiguous(struct device *dev, } #endif /* CONFIG_HAS_DMA */ +#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) +void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir); +void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir); +void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); +void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); +bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); + +static inline bool dma_dev_need_sync(const struct device *dev) +{ + /* Always call DMA sync operations when debugging is enabled */ + return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + if (dma_dev_need_sync(dev)) + __dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + if (dma_dev_need_sync(dev)) + __dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ + if (dma_dev_need_sync(dev)) + __dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ + if (dma_dev_need_sync(dev)) + __dma_sync_sg_for_device(dev, sg, nelems, dir); +} + +static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) +{ + return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false; +} +#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ +static inline bool dma_dev_need_sync(const struct device *dev) +{ + return false; +} +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} +static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) +{ + return false; +} +#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ + struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h index cfec5f946e..76a8de9ae1 100644 --- a/include/linux/dma/imx-dma.h +++ b/include/linux/dma/imx-dma.h @@ -41,6 +41,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_SAI, /* SAI */ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */ IMX_DMATYPE_HDMI, /* HDMI Audio */ + IMX_DMATYPE_I2C, /* I2C */ }; enum imx_dma_prio { diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e34b601b71..499bb2c634 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -117,7 +117,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, int count); /* Intel IOMMU detection */ void detect_intel_iommu(void); -extern int enable_drhd_fault_handling(void); +extern int enable_drhd_fault_handling(unsigned int cpu); extern int dmar_device_add(acpi_handle handle); extern int dmar_device_remove(acpi_handle handle); diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 4fcbf4d4fd..ff44ec3461 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -305,9 +305,9 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, #define DYNAMIC_DEBUG_BRANCH(descriptor) false #define dynamic_pr_debug(fmt, ...) \ - do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #define dynamic_dev_dbg(dev, fmt, ...) \ - do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) + dev_no_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__) #define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ do { if (0) \ diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index ff9c65841a..281298e77a 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -50,6 +50,9 @@ struct dql { unsigned int adj_limit; /* limit + num_completed */ unsigned int last_obj_cnt; /* Count at last queuing */ + /* Stall threshold (in jiffies), defined by user */ + unsigned short stall_thrs; + unsigned long history_head; /* top 58 bits of jiffies */ /* stall entries, a bit per entry */ unsigned long history[DQL_HIST_LEN]; @@ -71,8 +74,6 @@ struct dql { unsigned int min_limit; /* Minimum limit */ unsigned int slack_hold_time; /* Time to measure slack */ - /* Stall threshold (in jiffies), defined by user */ - unsigned short stall_thrs; /* Longest stall detected, reported to user */ unsigned short stall_max; unsigned long last_reap; /* Last reap (in jiffies) */ @@ -83,28 +84,11 @@ struct dql { #define DQL_MAX_OBJECT (UINT_MAX / 16) #define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) -/* - * Record number of objects queued. Assumes that caller has already checked - * availability in the queue with dql_avail. - */ -static inline void dql_queued(struct dql *dql, unsigned int count) +/* Populate the bitmap to be processed later in dql_check_stall() */ +static inline void dql_queue_stall(struct dql *dql) { unsigned long map, now, now_hi, i; - if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) - return; - - dql->last_obj_cnt = count; - - /* We want to force a write first, so that cpu do not attempt - * to get cache line containing last_obj_cnt, num_queued, adj_limit - * in Shared state, but directly does a Request For Ownership - * It is only a hint, we use barrier() only. - */ - barrier(); - - dql->num_queued += count; - now = jiffies; now_hi = now / BITS_PER_LONG; @@ -134,6 +118,31 @@ static inline void dql_queued(struct dql *dql, unsigned int count) WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now)); } +/* + * Record number of objects queued. Assumes that caller has already checked + * availability in the queue with dql_avail. + */ +static inline void dql_queued(struct dql *dql, unsigned int count) +{ + if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) + return; + + dql->last_obj_cnt = count; + + /* We want to force a write first, so that cpu do not attempt + * to get cache line containing last_obj_cnt, num_queued, adj_limit + * in Shared state, but directly does a Request For Ownership + * It is only a hint, we use barrier() only. + */ + barrier(); + + dql->num_queued += count; + + /* Only populate stall information if the threshold is set */ + if (READ_ONCE(dql->stall_thrs)) + dql_queue_stall(dql); +} + /* Returns how many objects can be queued, < 0 indicates over limit. */ static inline int dql_avail(const struct dql *dql) { diff --git a/include/linux/efi.h b/include/linux/efi.h index d59b0947fb..418e555459 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1072,12 +1072,11 @@ static inline u64 efivar_reserved_space(void) { return 0; } #endif /* - * The maximum size of VariableName + Data = 1024 - * Therefore, it's reasonable to save that much - * space in each part of the structure, - * and we use a page for reading/writing. + * There is no actual upper limit specified for the variable name size. + * + * This limit exists only for practical purposes, since name conversions + * are bounds-checked and name data is occasionally stored in-line. */ - #define EFI_VAR_NAME_LEN 1024 int efivars_register(struct efivars *efivars, diff --git a/include/linux/elf.h b/include/linux/elf.h index c9a46c4e18..5c402788da 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -65,7 +65,7 @@ extern Elf64_Dyn _DYNAMIC []; struct file; struct coredump_params; -#ifndef ARCH_HAVE_EXTRA_ELF_NOTES +#ifndef CONFIG_ARCH_HAVE_EXTRA_ELF_NOTES static inline int elf_coredump_extra_notes_size(void) { return 0; } static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; } #else diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 70cd7258cd..1ff52020cf 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -172,6 +172,7 @@ struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd); void em_table_free(struct em_perf_table __rcu *table); int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, int nr_states); +int em_dev_update_chip_binning(struct device *dev); /** * em_pd_get_efficient_state() - Get an efficient performance state from the EM @@ -386,6 +387,10 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, { return -EINVAL; } +static inline int em_dev_update_chip_binning(struct device *dev) +{ + return -EINVAL; +} #endif #endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index e44913a820..0ed47d0054 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -71,6 +71,12 @@ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; #define eth_stp_addr eth_reserved_addr_base +static const u8 eth_ipv4_mcast_addr_base[ETH_ALEN] __aligned(2) = +{ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; + +static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) = +{ 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; + /** * is_link_local_ether_addr - Determine if given Ethernet address is link-local * @addr: Pointer to a six-byte array containing the Ethernet address @@ -430,18 +436,16 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, static inline bool ether_addr_is_ipv4_mcast(const u8 *addr) { - u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; - return ether_addr_equal_masked(addr, base, mask); + return ether_addr_equal_masked(addr, eth_ipv4_mcast_addr_base, mask); } static inline bool ether_addr_is_ipv6_mcast(const u8 *addr) { - u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; - return ether_addr_equal_masked(addr, base, mask); + return ether_addr_equal_masked(addr, eth_ipv6_mcast_addr_base, mask); } static inline bool ether_addr_is_ip_mcast(const u8 *addr) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 9901e563f7..6fd9107d3c 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -480,6 +480,26 @@ struct ethtool_rmon_stats { ); }; +/** + * struct ethtool_ts_stats - HW timestamping statistics + * @pkts: Number of packets successfully timestamped by the hardware. + * @lost: Number of hardware timestamping requests where the timestamping + * information from the hardware never arrived for submission with + * the skb. + * @err: Number of arbitrary timestamp generation error events that the + * hardware encountered, exclusive of @lost statistics. Cases such + * as resource exhaustion, unavailability, firmware errors, and + * detected illogical timestamp values not submitted with the skb + * are inclusive to this counter. + */ +struct ethtool_ts_stats { + struct_group(tx_stats, + u64 pkts; + u64 lost; + u64 err; + ); +}; + #define ETH_MODULE_EEPROM_PAGE_LEN 128 #define ETH_MODULE_MAX_I2C_ADDRESS 0x7f @@ -755,7 +775,10 @@ struct ethtool_rxfh_param { * @get_ts_info: Get the time stamping and PTP hardware clock capabilities. * It may be called with RCU, or rtnl or reference on the device. * Drivers supporting transmit time stamps in software should set this to - * ethtool_op_get_ts_info(). + * ethtool_op_get_ts_info(). Drivers must not zero statistics which they + * don't report. The stats structure is initialized to ETHTOOL_STAT_NOT_SET + * indicating driver does not report statistics. + * @get_ts_stats: Query the device hardware timestamping statistics. * @get_module_info: Get the size and type of the eeprom contained within * a plug-in module. * @get_module_eeprom: Get the eeprom information from the plug-in module @@ -898,6 +921,8 @@ struct ethtool_ops { struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + void (*get_ts_stats)(struct net_device *dev, + struct ethtool_ts_stats *ts_stats); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, diff --git a/include/linux/evm.h b/include/linux/evm.h index d48d6da323..ddece4a6b2 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -26,6 +26,8 @@ extern int evm_protected_xattr_if_enabled(const char *req_xattr_name); extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, int buffer_size, char type, bool canonical_fmt); +extern bool evm_metadata_changed(struct inode *inode, + struct inode *metadata_inode); #ifdef CONFIG_FS_POSIX_ACL extern int posix_xattr_acl(const char *xattrname); #else @@ -76,5 +78,11 @@ static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, return -EOPNOTSUPP; } +static inline bool evm_metadata_changed(struct inode *inode, + struct inode *metadata_inode) +{ + return false; +} + #endif /* CONFIG_EVM */ #endif /* LINUX_EVM_H */ diff --git a/include/linux/execmem.h b/include/linux/execmem.h new file mode 100644 index 0000000000..32cef11441 --- /dev/null +++ b/include/linux/execmem.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_EXECMEM_ALLOC_H +#define _LINUX_EXECMEM_ALLOC_H + +#include <linux/types.h> +#include <linux/moduleloader.h> + +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(CONFIG_KASAN_VMALLOC) +#include <linux/kasan.h> +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else +#define MODULE_ALIGN PAGE_SIZE +#endif + +/** + * enum execmem_type - types of executable memory ranges + * + * There are several subsystems that allocate executable memory. + * Architectures define different restrictions on placement, + * permissions, alignment and other parameters for memory that can be used + * by these subsystems. + * Types in this enum identify subsystems that allocate executable memory + * and let architectures define parameters for ranges suitable for + * allocations by each subsystem. + * + * @EXECMEM_DEFAULT: default parameters that would be used for types that + * are not explicitly defined. + * @EXECMEM_MODULE_TEXT: parameters for module text sections + * @EXECMEM_KPROBES: parameters for kprobes + * @EXECMEM_FTRACE: parameters for ftrace + * @EXECMEM_BPF: parameters for BPF + * @EXECMEM_MODULE_DATA: parameters for module data sections + * @EXECMEM_TYPE_MAX: + */ +enum execmem_type { + EXECMEM_DEFAULT, + EXECMEM_MODULE_TEXT = EXECMEM_DEFAULT, + EXECMEM_KPROBES, + EXECMEM_FTRACE, + EXECMEM_BPF, + EXECMEM_MODULE_DATA, + EXECMEM_TYPE_MAX, +}; + +/** + * enum execmem_range_flags - options for executable memory allocations + * @EXECMEM_KASAN_SHADOW: allocate kasan shadow + */ +enum execmem_range_flags { + EXECMEM_KASAN_SHADOW = (1 << 0), +}; + +/** + * struct execmem_range - definition of an address space suitable for code and + * related data allocations + * @start: address space start + * @end: address space end (inclusive) + * @fallback_start: start of the secondary address space range for fallback + * allocations on architectures that require it + * @fallback_end: start of the secondary address space (inclusive) + * @pgprot: permissions for memory in this address space + * @alignment: alignment required for text allocations + * @flags: options for memory allocations for this range + */ +struct execmem_range { + unsigned long start; + unsigned long end; + unsigned long fallback_start; + unsigned long fallback_end; + pgprot_t pgprot; + unsigned int alignment; + enum execmem_range_flags flags; +}; + +/** + * struct execmem_info - architecture parameters for code allocations + * @ranges: array of parameter sets defining architecture specific + * parameters for executable memory allocations. The ranges that are not + * explicitly initialized by an architecture use parameters defined for + * @EXECMEM_DEFAULT. + */ +struct execmem_info { + struct execmem_range ranges[EXECMEM_TYPE_MAX]; +}; + +/** + * execmem_arch_setup - define parameters for allocations of executable memory + * + * A hook for architectures to define parameters for allocations of + * executable memory. These parameters should be filled into the + * @execmem_info structure. + * + * For architectures that do not implement this method a default set of + * parameters will be used + * + * Return: a structure defining architecture parameters and restrictions + * for allocations of executable memory + */ +struct execmem_info *execmem_arch_setup(void); + +/** + * execmem_alloc - allocate executable memory + * @type: type of the allocation + * @size: how many bytes of memory are required + * + * Allocates memory that will contain executable code, either generated or + * loaded from kernel modules. + * + * Allocates memory that will contain data coupled with executable code, + * like data sections in kernel modules. + * + * The memory will have protections defined by architecture for executable + * region of the @type. + * + * Return: a pointer to the allocated memory or %NULL + */ +void *execmem_alloc(enum execmem_type type, size_t size); + +/** + * execmem_free - free executable memory + * @ptr: pointer to the memory that should be freed + */ +void execmem_free(void *ptr); + +#if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE) +void execmem_init(void); +#else +static inline void execmem_init(void) {} +#endif + +#endif /* _LINUX_EXECMEM_ALLOC_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index a357287eac..41d1d71c36 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -394,7 +394,8 @@ struct f2fs_nat_block { /* * F2FS uses 4 bytes to represent block address. As a result, supported size of - * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. + * disk is 16 TB for a 4K page size and 64 TB for a 16K page size and it equals + * to 16 * 1024 * 1024 / 2 segments. */ #define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2) @@ -424,8 +425,10 @@ struct f2fs_sit_block { /* * For segment summary * - * One summary block contains exactly 512 summary entries, which represents - * exactly one segment by default. Not allow to change the basic units. + * One summary block with 4KB size contains exactly 512 summary entries, which + * represents exactly one segment with 2MB size. + * Similarly, in the case of block with 16KB size, it represents one segment with 8MB size. + * Not allow to change the basic units. * * NOTE: For initializing fields, you must use set_summary * @@ -556,6 +559,7 @@ typedef __le32 f2fs_hash_t; /* * space utilization of regular dentry and inline dentry (w/o extra reservation) + * when block size is 4KB. * regular dentry inline dentry (def) inline dentry (min) * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1 * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1 diff --git a/include/linux/fb.h b/include/linux/fb.h index 811e47f9d1..0f0834939b 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/workqueue.h> -#include <asm/fb.h> +#include <asm/video.h> struct backlight_device; struct device; @@ -742,6 +742,15 @@ extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); extern void framebuffer_release(struct fb_info *info); extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max); +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) +struct backlight_device *fb_bl_device(struct fb_info *info); +#else +static inline struct backlight_device *fb_bl_device(struct fb_info *info) +{ + return NULL; +} +#endif + /* fbmon.c */ #define FB_MAXTIMINGS 0 #define FB_VSYNCTIMINGS 1 diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 78c8326d74..2944d4aa41 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -33,16 +33,6 @@ struct fdtable { struct rcu_head rcu; }; -static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt) -{ - return test_bit(fd, fdt->close_on_exec); -} - -static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) -{ - return test_bit(fd, fdt->open_fds); -} - /* * Open file table structure */ @@ -107,6 +97,11 @@ struct file *lookup_fdget_rcu(unsigned int fd); struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd); struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd); +static inline bool close_on_exec(unsigned int fd, const struct files_struct *files) +{ + return test_bit(fd, files_fdtable(files)->close_on_exec); +} + struct task_struct; void put_files_struct(struct files_struct *fs); diff --git a/include/linux/file.h b/include/linux/file.h index 169692cb19..45d0f4800a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -84,6 +84,7 @@ static inline void fdput_pos(struct fd f) } DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd) +DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd) extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); diff --git a/include/linux/filter.h b/include/linux/filter.h index cf12bfa2a7..5669da513c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -75,6 +75,9 @@ struct ctl_table_header; /* unused opcode to mark special load instruction. Same as BPF_MSH */ #define BPF_PROBE_MEM32 0xa0 +/* unused opcode to mark special atomic instruction */ +#define BPF_PROBE_ATOMIC 0xe0 + /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 @@ -178,6 +181,25 @@ struct ctl_table_header; .off = 0, \ .imm = 0 }) +/* Special (internal-only) form of mov, used to resolve per-CPU addrs: + * dst_reg = src_reg + <percpu_base_off> + * BPF_ADDR_PERCPU is used as a special insn->off value. + */ +#define BPF_ADDR_PERCPU (-1) + +#define BPF_MOV64_PERCPU_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = BPF_ADDR_PERCPU, \ + .imm = 0 }) + +static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU; +} + /* Short form of mov, dst_reg = imm32 */ #define BPF_MOV64_IMM(DST, IMM) \ @@ -228,6 +250,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; } +/* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers + * to pointers in user vma. + */ +static inline bool insn_is_cast_user(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && + insn->off == BPF_ADDR_SPACE_CAST && + insn->imm == 1U << 16; +} + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) @@ -644,14 +676,16 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, cant_migrate(); if (static_branch_unlikely(&bpf_stats_enabled_key)) { struct bpf_prog_stats *stats; - u64 start = sched_clock(); + u64 duration, start = sched_clock(); unsigned long flags; ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + + duration = sched_clock() - start; stats = this_cpu_ptr(prog->stats); flags = u64_stats_update_begin_irqsave(&stats->syncp); u64_stats_inc(&stats->cnt); - u64_stats_add(&stats->nsecs, sched_clock() - start); + u64_stats_add(&stats->nsecs, duration); u64_stats_update_end_irqrestore(&stats->syncp, flags); } else { ret = dfunc(ctx, prog->insnsi, prog->bpf_func); @@ -959,12 +993,15 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); +bool bpf_jit_inlines_helper_call(s32 imm); bool bpf_jit_supports_subprog_tailcalls(void); +bool bpf_jit_supports_percpu_insn(void); bool bpf_jit_supports_kfunc_call(void); bool bpf_jit_supports_far_kfunc_call(void); bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_ptr_xchg(void); bool bpf_jit_supports_arena(void); +bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena); u64 bpf_arch_uaddress_limit(void); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); @@ -1171,18 +1208,18 @@ static inline bool bpf_jit_kallsyms_enabled(void) return false; } -const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, +int __bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char *sym); bool is_bpf_text_address(unsigned long addr); int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym); struct bpf_prog *bpf_prog_ksym_find(unsigned long addr); -static inline const char * +static inline int bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { - const char *ret = __bpf_address_lookup(addr, size, off, sym); + int ret = __bpf_address_lookup(addr, size, off, sym); if (ret && modname) *modname = NULL; @@ -1226,11 +1263,11 @@ static inline bool bpf_jit_kallsyms_enabled(void) return false; } -static inline const char * +static inline int __bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char *sym) { - return NULL; + return 0; } static inline bool is_bpf_text_address(unsigned long addr) @@ -1249,11 +1286,11 @@ static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) return NULL; } -static inline const char * +static inline int bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { - return NULL; + return 0; } static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) diff --git a/include/linux/find.h b/include/linux/find.h index c69598e383..5dfca4225f 100644 --- a/include/linux/find.h +++ b/include/linux/find.h @@ -29,6 +29,8 @@ unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsign unsigned long n); extern unsigned long _find_first_and_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long size); +unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2, + const unsigned long *addr3, unsigned long size); extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); @@ -220,7 +222,7 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size) * idx = find_first_bit(addr, size); * * Returns the bit number of the N'th set bit. - * If no such, returns @size. + * If no such, returns >= @size. */ static inline unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n) @@ -345,6 +347,31 @@ unsigned long find_first_and_bit(const unsigned long *addr1, } #endif +/** + * find_first_and_and_bit - find the first set bit in 3 memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @addr3: The third address to base the search on + * @size: The bitmap size in bits + * + * Returns the bit number for the first set bit + * If no bits are set, returns @size. + */ +static inline +unsigned long find_first_and_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + const unsigned long *addr3, + unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr1 & *addr2 & *addr3 & GENMASK(size - 1, 0); + + return val ? __ffs(val) : size; + } + + return _find_first_and_and_bit(addr1, addr2, addr3, size); +} + #ifndef find_first_zero_bit /** * find_first_zero_bit - find the first cleared bit in a memory region diff --git a/include/linux/firewire.h b/include/linux/firewire.h index dd9f2d765e..1cca14cf56 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -462,7 +462,7 @@ struct fw_iso_packet { /* rx: Sync bit, wait for matching sy */ u32 tag:2; /* tx: Tag in packet header */ u32 sy:4; /* tx: Sy in packet header */ - u32 header_length:8; /* Length of immediate header */ + u32 header_length:8; /* Size of immediate header */ u32 header[]; /* tx: Top of 1394 isoch. data_block */ }; diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h index 23384a54d5..82687e07a7 100644 --- a/include/linux/firmware/cirrus/cs_dsp.h +++ b/include/linux/firmware/cirrus/cs_dsp.h @@ -238,8 +238,12 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp); int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id); int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, const void *buf, size_t len); +int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, + const void *buf, size_t len); int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, void *buf, size_t len); +int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, + void *buf, size_t len); struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type, unsigned int alg); diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 9754f97e71..7e0f340bf3 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -15,10 +15,14 @@ #define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \ FIELD_PREP(GENMASK(7, 1), func)) +/* Overridden by KUnit tests. */ #ifndef fortify_panic # define fortify_panic(func, write, avail, size, retfail) \ __fortify_panic(FORTIFY_REASON(func, write), avail, size) #endif +#ifndef fortify_warn_once +# define fortify_warn_once(x...) WARN_ONCE(x) +#endif #define FORTIFY_READ 0 #define FORTIFY_WRITE 1 @@ -623,7 +627,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, const size_t __q_size = (q_size); \ const size_t __p_size_field = (p_size_field); \ const size_t __q_size_field = (q_size_field); \ - WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \ + fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \ __q_size, __p_size_field, \ __q_size_field, FORTIFY_FUNC_ ##op), \ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \ @@ -739,9 +743,9 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) return __real_memchr_inv(p, c, size); } -extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup) +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof) __realloc_size(2); -__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp) +__FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp) { const size_t p_size = __struct_size(p); @@ -752,6 +756,7 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp __real_kmemdup(p, 0, gfp)); return __real_kmemdup(p, size, gfp); } +#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__)) /** * strcpy - Copy a string into another string buffer diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 3e03758151..f398695881 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -7,6 +7,16 @@ #include <linux/ftrace.h> #include <linux/rethook.h> +struct fprobe; + +typedef int (*fprobe_entry_cb)(struct fprobe *fp, unsigned long entry_ip, + unsigned long ret_ip, struct pt_regs *regs, + void *entry_data); + +typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip, + unsigned long ret_ip, struct pt_regs *regs, + void *entry_data); + /** * struct fprobe - ftrace based probe. * @ops: The ftrace_ops. @@ -34,12 +44,8 @@ struct fprobe { size_t entry_data_size; int nr_maxactive; - int (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, - unsigned long ret_ip, struct pt_regs *regs, - void *entry_data); - void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, - unsigned long ret_ip, struct pt_regs *regs, - void *entry_data); + fprobe_entry_cb entry_handler; + fprobe_exit_cb exit_handler; }; /* This fprobe is soft-disabled. */ diff --git a/include/linux/fpu.h b/include/linux/fpu.h new file mode 100644 index 0000000000..2fb63e2291 --- /dev/null +++ b/include/linux/fpu.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_FPU_H +#define _LINUX_FPU_H + +#ifdef _LINUX_FPU_COMPILATION_UNIT +#error FP code must be compiled separately. See Documentation/core-api/floating-point.rst. +#endif + +#include <asm/fpu.h> + +#endif diff --git a/include/linux/fs.h b/include/linux/fs.h index b09f141321..0283cf366c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -73,6 +73,8 @@ struct fscrypt_inode_info; struct fscrypt_operations; struct fsverity_info; struct fsverity_operations; +struct fsnotify_mark_connector; +struct fsnotify_sb_info; struct fs_context; struct fs_parameter_spec; struct fileattr; @@ -110,23 +112,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, */ /* file is open for reading */ -#define FMODE_READ ((__force fmode_t)0x1) +#define FMODE_READ ((__force fmode_t)(1 << 0)) /* file is open for writing */ -#define FMODE_WRITE ((__force fmode_t)0x2) +#define FMODE_WRITE ((__force fmode_t)(1 << 1)) /* file is seekable */ -#define FMODE_LSEEK ((__force fmode_t)0x4) +#define FMODE_LSEEK ((__force fmode_t)(1 << 2)) /* file can be accessed using pread */ -#define FMODE_PREAD ((__force fmode_t)0x8) +#define FMODE_PREAD ((__force fmode_t)(1 << 3)) /* file can be accessed using pwrite */ -#define FMODE_PWRITE ((__force fmode_t)0x10) +#define FMODE_PWRITE ((__force fmode_t)(1 << 4)) /* File is opened for execution with sys_execve / sys_uselib */ -#define FMODE_EXEC ((__force fmode_t)0x20) +#define FMODE_EXEC ((__force fmode_t)(1 << 5)) /* File writes are restricted (block device specific) */ -#define FMODE_WRITE_RESTRICTED ((__force fmode_t)0x40) +#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6)) + +/* FMODE_* bits 7 to 8 */ + /* 32bit hashes as llseek() offset (for directories) */ -#define FMODE_32BITHASH ((__force fmode_t)0x200) +#define FMODE_32BITHASH ((__force fmode_t)(1 << 9)) /* 64bit hashes as llseek() offset (for directories) */ -#define FMODE_64BITHASH ((__force fmode_t)0x400) +#define FMODE_64BITHASH ((__force fmode_t)(1 << 10)) /* * Don't update ctime and mtime. @@ -134,60 +139,53 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * Currently a special hack for the XFS open_by_handle ioctl, but we'll * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. */ -#define FMODE_NOCMTIME ((__force fmode_t)0x800) +#define FMODE_NOCMTIME ((__force fmode_t)(1 << 11)) /* Expect random access pattern */ -#define FMODE_RANDOM ((__force fmode_t)0x1000) +#define FMODE_RANDOM ((__force fmode_t)(1 << 12)) /* File is huge (eg. /dev/mem): treat loff_t as unsigned */ -#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) +#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13)) /* File is opened with O_PATH; almost nothing can be done with it */ -#define FMODE_PATH ((__force fmode_t)0x4000) +#define FMODE_PATH ((__force fmode_t)(1 << 14)) /* File needs atomic accesses to f_pos */ -#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) +#define FMODE_ATOMIC_POS ((__force fmode_t)(1 << 15)) /* Write access to underlying fs */ -#define FMODE_WRITER ((__force fmode_t)0x10000) +#define FMODE_WRITER ((__force fmode_t)(1 << 16)) /* Has read method(s) */ -#define FMODE_CAN_READ ((__force fmode_t)0x20000) +#define FMODE_CAN_READ ((__force fmode_t)(1 << 17)) /* Has write method(s) */ -#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) +#define FMODE_CAN_WRITE ((__force fmode_t)(1 << 18)) -#define FMODE_OPENED ((__force fmode_t)0x80000) -#define FMODE_CREATED ((__force fmode_t)0x100000) +#define FMODE_OPENED ((__force fmode_t)(1 << 19)) +#define FMODE_CREATED ((__force fmode_t)(1 << 20)) /* File is stream-like */ -#define FMODE_STREAM ((__force fmode_t)0x200000) +#define FMODE_STREAM ((__force fmode_t)(1 << 21)) /* File supports DIRECT IO */ -#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000) +#define FMODE_CAN_ODIRECT ((__force fmode_t)(1 << 22)) -#define FMODE_NOREUSE ((__force fmode_t)0x800000) +#define FMODE_NOREUSE ((__force fmode_t)(1 << 23)) -/* File supports non-exclusive O_DIRECT writes from multiple threads */ -#define FMODE_DIO_PARALLEL_WRITE ((__force fmode_t)0x1000000) +/* FMODE_* bit 24 */ /* File is embedded in backing_file object */ -#define FMODE_BACKING ((__force fmode_t)0x2000000) +#define FMODE_BACKING ((__force fmode_t)(1 << 25)) /* File was opened by fanotify and shouldn't generate fanotify events */ -#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) +#define FMODE_NONOTIFY ((__force fmode_t)(1 << 26)) /* File is capable of returning -EAGAIN if I/O will block */ -#define FMODE_NOWAIT ((__force fmode_t)0x8000000) +#define FMODE_NOWAIT ((__force fmode_t)(1 << 27)) /* File represents mount that needs unmounting */ -#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000) +#define FMODE_NEED_UNMOUNT ((__force fmode_t)(1 << 28)) /* File does not contribute to nr_files count */ -#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) - -/* File supports async buffered reads */ -#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) - -/* File supports async nowait buffered writes */ -#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000) +#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29)) /* * Attribute flags. These should be or-ed together to figure out what @@ -622,8 +620,6 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 -struct fsnotify_mark_connector; - /* * Keep mostly read-only and often accessed (especially for * the RCU path lookup and 'stat' data) fields at the beginning @@ -1035,12 +1031,13 @@ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[]; + unsigned char f_handle[] __counted_by(handle_bytes); }; static inline struct file *get_file(struct file *f) { - atomic_long_inc(&f->f_count); + long prior = atomic_long_fetch_inc_relaxed(&f->f_count); + WARN_ONCE(!prior, "struct file::f_count incremented from zero; use-after-free condition present!\n"); return f; } @@ -1177,7 +1174,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 -#define SB_I_EVM_UNSUPPORTED 0x00000080 +#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080 #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ #define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ @@ -1251,7 +1248,7 @@ struct super_block { /* * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and - * s_fsnotify_marks together for cache efficiency. They are frequently + * s_fsnotify_info together for cache efficiency. They are frequently * accessed and rarely modified. */ void *s_fs_info; /* Filesystem private info */ @@ -1263,7 +1260,7 @@ struct super_block { time64_t s_time_max; #ifdef CONFIG_FSNOTIFY __u32 s_fsnotify_mask; - struct fsnotify_mark_connector __rcu *s_fsnotify_marks; + struct fsnotify_sb_info *s_fsnotify_info; #endif /* @@ -1304,12 +1301,6 @@ struct super_block { /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; - /* - * Number of inode/mount/sb objects that are being watched, note that - * inodes objects are currently double-accounted. - */ - atomic_long_t s_fsnotify_connectors; - /* Read-only state of the superblock is being changed */ int s_readonly_remount; @@ -1908,7 +1899,7 @@ struct file *kernel_tmpfile_open(struct mnt_idmap *idmap, umode_t mode, int open_flag, const struct cred *cred); struct file *kernel_file_open(const struct path *path, int flags, - struct inode *inode, const struct cred *cred); + const struct cred *cred); int vfs_mkobj(struct dentry *, umode_t, int (*f)(struct dentry *, umode_t, void *), @@ -2003,8 +1994,11 @@ struct iov_iter; struct io_uring_cmd; struct offset_ctx; +typedef unsigned int __bitwise fop_flags_t; + struct file_operations { struct module *owner; + fop_flags_t fop_flags; loff_t (*llseek) (struct file *, loff_t, int); ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); @@ -2017,7 +2011,6 @@ struct file_operations { long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); - unsigned long mmap_supported_flags; int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); @@ -2048,6 +2041,17 @@ struct file_operations { unsigned int poll_flags); } __randomize_layout; +/* Supports async buffered reads */ +#define FOP_BUFFER_RASYNC ((__force fop_flags_t)(1 << 0)) +/* Supports async buffered writes */ +#define FOP_BUFFER_WASYNC ((__force fop_flags_t)(1 << 1)) +/* Supports synchronous page faults for mappings */ +#define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2)) +/* Supports non-exclusive O_DIRECT writes from multiple threads */ +#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3)) +/* Contains huge pages */ +#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4)) + /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, int (*) (struct file *, struct dir_context *)); @@ -2098,18 +2102,6 @@ struct inode_operations { struct offset_ctx *(*get_offset_ctx)(struct inode *inode); } ____cacheline_aligned; -static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, - struct iov_iter *iter) -{ - return file->f_op->read_iter(kio, iter); -} - -static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, - struct iov_iter *iter) -{ - return file->f_op->write_iter(kio, iter); -} - static inline int call_mmap(struct file *file, struct vm_area_struct *vma) { return file->f_op->mmap(file, vma); @@ -2119,6 +2111,7 @@ extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); +int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write); int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags, @@ -2253,7 +2246,13 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) + +#ifdef CONFIG_SWAP #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) +#else +#define IS_SWAPFILE(inode) ((void)(inode), 0U) +#endif + #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) #define IS_IMA(inode) ((inode)->i_flags & S_IMA) #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) @@ -3085,11 +3084,7 @@ int setattr_should_drop_sgid(struct mnt_idmap *idmap, * This must be used for allocating filesystems specific inodes to set * up the inode reclaim context correctly. */ -static inline void * -alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp) -{ - return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp); -} +#define alloc_inode_sb(_sb, _cache, _gfp) kmem_cache_alloc_lru(_cache, &_sb->s_inode_lru, _gfp) extern void __insert_inode_hash(struct inode *, unsigned long hashval); static inline void insert_inode_hash(struct inode *inode) diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 01542c4b87..d335097911 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -132,4 +132,8 @@ static inline bool fs_validate_description(const char *name, #define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL) #define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL) +/* String parameter that allows empty argument */ +#define fsparam_string_empty(NAME, OPT) \ + __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) + #endif /* _LINUX_FS_PARSER_H */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 6e8562cbcc..9de2764360 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -172,9 +172,12 @@ extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *); extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *); -extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *, - loff_t, size_t, loff_t, netfs_io_terminated_t, void *, - bool); +void __fscache_write_to_cache(struct fscache_cookie *cookie, + struct address_space *mapping, + loff_t start, size_t len, loff_t i_size, + netfs_io_terminated_t term_func, + void *term_func_priv, + bool using_pgpriv2, bool cond); extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t); /** @@ -597,7 +600,8 @@ static inline void fscache_clear_page_bits(struct address_space *mapping, * @i_size: The new size of the inode * @term_func: The function to call upon completion * @term_func_priv: The private data for @term_func - * @caching: If PG_fscache has been set + * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write + * @caching: If we actually want to do the caching * * Helper function for a netfs to write dirty data from an inode into the cache * object that's backing it. @@ -608,19 +612,21 @@ static inline void fscache_clear_page_bits(struct address_space *mapping, * marked with PG_fscache. * * If given, @term_func will be called upon completion and supplied with - * @term_func_priv. Note that the PG_fscache flags will have been cleared by - * this point, so the netfs must retain its own pin on the mapping. + * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags + * will have been cleared by this point, so the netfs must retain its own pin + * on the mapping. */ static inline void fscache_write_to_cache(struct fscache_cookie *cookie, struct address_space *mapping, loff_t start, size_t len, loff_t i_size, netfs_io_terminated_t term_func, void *term_func_priv, - bool caching) + bool using_pgpriv2, bool caching) { if (caching) __fscache_write_to_cache(cookie, mapping, start, len, i_size, - term_func, term_func_priv, caching); + term_func, term_func_priv, + using_pgpriv2, caching); else if (term_func) term_func(term_func_priv, -ENOBUFS, false); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f79853d277..278620e063 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -17,10 +17,23 @@ #include <linux/slab.h> #include <linux/bug.h> +/* Are there any inode/mount/sb objects watched with priority prio or above? */ +static inline bool fsnotify_sb_has_priority_watchers(struct super_block *sb, + int prio) +{ + struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); + + /* Were any marks ever added to any object on this sb? */ + if (!sbinfo) + return false; + + return atomic_long_read(&sbinfo->watched_objects[prio]); +} + /* Are there any inode/mount/sb objects that are being watched at all? */ static inline bool fsnotify_sb_has_watchers(struct super_block *sb) { - return atomic_long_read(&sb->s_fsnotify_connectors); + return fsnotify_sb_has_priority_watchers(sb, 0); } /* @@ -109,6 +122,12 @@ static inline int fsnotify_file(struct file *file, __u32 mask) return 0; path = &file->f_path; + /* Permission events require group prio >= FSNOTIFY_PRIO_CONTENT */ + if (mask & ALL_FSNOTIFY_PERM_EVENTS && + !fsnotify_sb_has_priority_watchers(path->dentry->d_sb, + FSNOTIFY_PRIO_CONTENT)) + return 0; + return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 8f40c349b2..4dd6143db2 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -177,6 +177,17 @@ struct fsnotify_event { }; /* + * fsnotify group priorities. + * Events are sent in order from highest priority to lowest priority. + */ +enum fsnotify_group_prio { + FSNOTIFY_PRIO_NORMAL = 0, /* normal notifiers, no permissions */ + FSNOTIFY_PRIO_CONTENT, /* fanotify permission events */ + FSNOTIFY_PRIO_PRE_CONTENT, /* fanotify pre-content events */ + __FSNOTIFY_PRIO_NUM +}; + +/* * A group is a "thing" that wants to receive notification about filesystem * events. The mask holds the subset of event types this group cares about. * refcnt on a group is up to the implementor and at any moment if it goes 0 @@ -201,14 +212,7 @@ struct fsnotify_group { wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ unsigned int q_len; /* events on the queue */ unsigned int max_events; /* maximum events allowed on the list */ - /* - * Valid fsnotify group priorities. Events are send in order from highest - * priority to lowest priority. We default to the lowest priority. - */ - #define FS_PRIO_0 0 /* normal notifiers, no permissions */ - #define FS_PRIO_1 1 /* fanotify content based access control */ - #define FS_PRIO_2 2 /* fanotify pre-content access */ - unsigned int priority; + enum fsnotify_group_prio priority; /* priority for sending events */ bool shutdown; /* group is being shut down, don't queue more events */ #define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */ @@ -457,13 +461,6 @@ FSNOTIFY_ITER_FUNCS(sb, SB) type++) /* - * fsnotify_connp_t is what we embed in objects which connector can be attached - * to. fsnotify_connp_t * is how we refer from connector back to object. - */ -struct fsnotify_mark_connector; -typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; - -/* * Inode/vfsmount/sb point to this structure which tracks all marks attached to * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this * structure. We destroy this structure when there are no more marks attached @@ -471,12 +468,14 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; */ struct fsnotify_mark_connector { spinlock_t lock; - unsigned short type; /* Type of object [lock] */ + unsigned char type; /* Type of object [lock] */ + unsigned char prio; /* Highest priority group */ +#define FSNOTIFY_CONN_FLAG_IS_WATCHED 0x01 #define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02 unsigned short flags; /* flags [lock] */ union { /* Object pointer [lock] */ - fsnotify_connp_t *obj; + void *obj; /* Used listing heads to free after srcu period expires */ struct fsnotify_mark_connector *destroy_next; }; @@ -484,6 +483,37 @@ struct fsnotify_mark_connector { }; /* + * Container for per-sb fsnotify state (sb marks and more). + * Attached lazily on first marked object on the sb and freed when killing sb. + */ +struct fsnotify_sb_info { + struct fsnotify_mark_connector __rcu *sb_marks; + /* + * Number of inode/mount/sb objects that are being watched in this sb. + * Note that inodes objects are currently double-accounted. + * + * The value in watched_objects[prio] is the number of objects that are + * watched by groups of priority >= prio, so watched_objects[0] is the + * total number of watched objects in this sb. + */ + atomic_long_t watched_objects[__FSNOTIFY_PRIO_NUM]; +}; + +static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb) +{ +#ifdef CONFIG_FSNOTIFY + return READ_ONCE(sb->s_fsnotify_info); +#else + return NULL; +#endif +} + +static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb) +{ + return &fsnotify_sb_info(sb)->watched_objects[0]; +} + +/* * A mark is simply an object attached to an in core inode which allows an * fsnotify listener to indicate they are either no longer interested in events * of a type matching mask or only interested in those events. @@ -546,6 +576,7 @@ extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); +extern void fsnotify_sb_free(struct super_block *sb); extern u32 fsnotify_get_cookie(void); static inline __u32 fsnotify_parent_needed_mask(__u32 mask) @@ -758,30 +789,35 @@ extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn); extern void fsnotify_init_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); /* Find mark belonging to given group in the list of marks */ -extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, - struct fsnotify_group *group); +struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type, + struct fsnotify_group *group); /* attach the mark to the object */ -extern int fsnotify_add_mark(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, unsigned int obj_type, - int add_flags); -extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, - unsigned int obj_type, int add_flags); +int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj, + unsigned int obj_type, int add_flags); +int fsnotify_add_mark_locked(struct fsnotify_mark *mark, void *obj, + unsigned int obj_type, int add_flags); /* attach the mark to the inode */ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, struct inode *inode, int add_flags) { - return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, add_flags); + return fsnotify_add_mark(mark, inode, FSNOTIFY_OBJ_TYPE_INODE, + add_flags); } static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, struct inode *inode, int add_flags) { - return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, add_flags); + return fsnotify_add_mark_locked(mark, inode, FSNOTIFY_OBJ_TYPE_INODE, + add_flags); +} + +static inline struct fsnotify_mark *fsnotify_find_inode_mark( + struct inode *inode, + struct fsnotify_group *group) +{ + return fsnotify_find_mark(inode, FSNOTIFY_OBJ_TYPE_INODE, group); } /* given a group and a mark, flag mark to be freed when all references are dropped */ @@ -845,6 +881,9 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) static inline void fsnotify_sb_delete(struct super_block *sb) {} +static inline void fsnotify_sb_free(struct super_block *sb) +{} + static inline void fsnotify_update_flags(struct dentry *dentry) {} diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 54d53f345d..b792274189 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -83,19 +83,18 @@ static inline void early_trace_init(void) { } struct module; struct ftrace_hash; -struct ftrace_direct_func; #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ defined(CONFIG_DYNAMIC_FTRACE) -const char * +int ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym); #else -static inline const char * +static inline int ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) { - return NULL; + return 0; } #endif @@ -414,7 +413,6 @@ struct ftrace_func_entry { }; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS -extern int ftrace_direct_func_count; unsigned long ftrace_find_rec_direct(unsigned long ip); int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, @@ -426,7 +424,6 @@ void ftrace_stub_direct_tramp(void); #else struct ftrace_ops; -# define ftrace_direct_func_count 0 static inline unsigned long ftrace_find_rec_direct(unsigned long ip) { return 0; @@ -822,7 +819,8 @@ static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \ - defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) + defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \ + defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) /** * ftrace_modify_call - convert from one addr to another (no nop) * @rec: the call site record (e.g. mcount/fentry) diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h deleted file mode 100644 index c285968e43..0000000000 --- a/include/linux/genetlink.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_GENERIC_NETLINK_H -#define __LINUX_GENERIC_NETLINK_H - -#include <uapi/linux/genetlink.h> - - -/* All generic netlink requests are serialized by a global lock. */ -extern void genl_lock(void); -extern void genl_unlock(void); - -/* for synchronisation between af_netlink and genetlink */ -extern atomic_t genl_sk_destructing_cnt; -extern wait_queue_head_t genl_sk_destructing_waitq; - -#define MODULE_ALIAS_GENL_FAMILY(family)\ - MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) - -#endif /* __LINUX_GENERIC_NETLINK_H */ diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index a419d93789..621b87a87d 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -15,8 +15,8 @@ #endif #include <linux/args.h> -#include <linux/genetlink.h> #include <linux/types.h> +#include <net/genetlink.h> extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void); extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c775ea3c60..7f9691d375 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -6,6 +6,8 @@ #include <linux/mmzone.h> #include <linux/topology.h> +#include <linux/alloc_tag.h> +#include <linux/sched.h> struct vm_area_struct; struct mempolicy; @@ -155,6 +157,31 @@ static inline int gfp_zonelist(gfp_t flags) } /* + * gfp flag masking for nested internal allocations. + * + * For code that needs to do allocations inside the public allocation API (e.g. + * memory allocation tracking code) the allocations need to obey the caller + * allocation context constrains to prevent allocation context mismatches (e.g. + * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock + * situations. + * + * It is also assumed that these nested allocations are for internal kernel + * object storage purposes only and are not going to be used for DMA, etc. Hence + * we strip out all the zone information and leave just the context information + * intact. + * + * Further, internal allocations must fail before the higher level allocation + * can fail, so we must make them fail faster and fail silently. We also don't + * want them to deplete emergency reserves. Hence nested allocations must be + * prepared for these allocations to fail. + */ +static inline gfp_t gfp_nested_mask(gfp_t flags) +{ + return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) | + (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)); +} + +/* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and @@ -175,42 +202,46 @@ static inline void arch_free_page(struct page *page, int order) { } static inline void arch_alloc_page(struct page *page, int order) { } #endif -struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, +struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); -struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, +#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__)) + +struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); +#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__)) -unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, +unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, struct list_head *page_list, struct page **page_array); +#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) -unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, +unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array); +#define alloc_pages_bulk_array_mempolicy(...) \ + alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__)) /* Bulk allocate order-0 pages */ -static inline unsigned long -alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) -{ - return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); -} +#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \ + __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL) -static inline unsigned long -alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) -{ - return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); -} +#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \ + __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array) static inline unsigned long -alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) +alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, + struct page **page_array) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); + return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array); } +#define alloc_pages_bulk_array_node(...) \ + alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__)) + static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) { gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); @@ -230,82 +261,104 @@ static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) * online. For more general interface, see alloc_pages_node(). */ static inline struct page * -__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) +__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); warn_if_node_offline(nid, gfp_mask); - return __alloc_pages(gfp_mask, order, nid, NULL); + return __alloc_pages_noprof(gfp_mask, order, nid, NULL); } +#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__)) + static inline -struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) +struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); warn_if_node_offline(nid, gfp); - return __folio_alloc(gfp, order, nid, NULL); + return __folio_alloc_noprof(gfp, order, nid, NULL); } +#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__)) + /* * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, * prefer the current CPU's closest node. Otherwise node must be valid and * online. */ -static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, - unsigned int order) +static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, + unsigned int order) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - return __alloc_pages_node(nid, gfp_mask, order); + return __alloc_pages_node_noprof(nid, gfp_mask, order); } +#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) + #ifdef CONFIG_NUMA -struct page *alloc_pages(gfp_t gfp, unsigned int order); -struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, +struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); +struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); -struct folio *folio_alloc(gfp_t gfp, unsigned int order); -struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, +struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); +struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); #else -static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) +static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { - return alloc_pages_node(numa_node_id(), gfp_mask, order); + return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); } -static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, +static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid) { - return alloc_pages(gfp, order); + return alloc_pages_noprof(gfp, order); } -static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) +static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { return __folio_alloc_node(gfp, order, numa_node_id()); } -#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \ - folio_alloc(gfp, order) +#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \ + folio_alloc_noprof(gfp, order) #endif + +#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) +#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__)) +#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) +#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) + #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) -static inline struct page *alloc_page_vma(gfp_t gfp, + +static inline struct page *alloc_page_vma_noprof(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false); + struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false); return &folio->page; } +#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__)) + +extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order); +#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__)) + +extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask); +#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__)) -extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); -extern unsigned long get_zeroed_page(gfp_t gfp_mask); +void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1); +#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__)) -void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1); void free_pages_exact(void *virt, size_t size); -__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); -#define __get_free_page(gfp_mask) \ - __get_free_pages((gfp_mask), 0) +__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); +#define alloc_pages_exact_nid(...) \ + alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__)) -#define __get_dma_pages(gfp_mask, order) \ - __get_free_pages((gfp_mask) | GFP_DMA, (order)) +#define __get_free_page(gfp_mask) \ + __get_free_pages((gfp_mask), 0) + +#define __get_dma_pages(gfp_mask, order) \ + __get_free_pages((gfp_mask) | GFP_DMA, (order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); @@ -374,10 +427,14 @@ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); #ifdef CONFIG_CONTIG_ALLOC /* The below functions must be run on a range from a single zone. */ -extern int alloc_contig_range(unsigned long start, unsigned long end, +extern int alloc_contig_range_noprof(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask); -extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, - int nid, nodemask_t *nodemask); +#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) + +extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, + int nid, nodemask_t *nodemask); +#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__)) + #endif void free_contig_range(unsigned long pfn, unsigned long nr_pages); diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 13becafe41..313be4ad79 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -55,6 +55,9 @@ enum { #ifdef CONFIG_LOCKDEP ___GFP_NOLOCKDEP_BIT, #endif +#ifdef CONFIG_SLAB_OBJ_EXT + ___GFP_NO_OBJ_EXT_BIT, +#endif ___GFP_LAST_BIT }; @@ -95,6 +98,11 @@ enum { #else #define ___GFP_NOLOCKDEP 0 #endif +#ifdef CONFIG_SLAB_OBJ_EXT +#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT) +#else +#define ___GFP_NO_OBJ_EXT 0 +#endif /* * Physical address zone modifiers (see linux/mmzone.h - low four bits) @@ -135,12 +143,15 @@ enum { * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * + * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) +#define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT) /** * DOC: Watermark modifiers diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 7ecc25c543..56ac7e7a28 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -74,6 +74,12 @@ static inline bool gpio_is_valid(int number) * Until they are all fixed, leave 0-512 space for them. */ #define GPIO_DYNAMIC_BASE 512 +/* + * Define the maximum of the possible GPIO in the global numberspace. + * While the GPIO base and numbers are positive, we limit it with signed + * maximum as a lot of code is using negative values for special cases. + */ +#define GPIO_DYNAMIC_MAX INT_MAX /* Always use the library code for GPIO management calls, * or when sleeping may be involved. @@ -114,8 +120,6 @@ static inline int gpio_to_irq(unsigned gpio) } int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); -int gpio_request_array(const struct gpio *array, size_t num); -void gpio_free_array(const struct gpio *array, size_t num); /* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */ @@ -146,11 +150,6 @@ static inline int gpio_request_one(unsigned gpio, return -ENOSYS; } -static inline int gpio_request_array(const struct gpio *array, size_t num) -{ - return -ENOSYS; -} - static inline void gpio_free(unsigned gpio) { might_sleep(); @@ -159,14 +158,6 @@ static inline void gpio_free(unsigned gpio) WARN_ON(1); } -static inline void gpio_free_array(const struct gpio *array, size_t num) -{ - might_sleep(); - - /* GPIO can never have been requested */ - WARN_ON(1); -} - static inline int gpio_direction_input(unsigned gpio) { return -ENOSYS; diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index f8617eaf08..0032bb6e7d 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -376,9 +376,7 @@ struct gpio_irq_chip { * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the - * array must be @ngpio entries long. A name can include a single printk - * format specifier for an unsigned int. It is substituted by the actual - * number of the gpio. + * array must be @ngpio entries long. * @can_sleep: flag must be set iff get()/set() methods sleep, as they * must while accessing GPIO expander chips over I2C or SPI. This * implies that if the chip supports IRQs, these IRQs need to be threaded diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h index 1a14e23922..0d22093080 100644 --- a/include/linux/gpio/property.h +++ b/include/linux/gpio/property.h @@ -4,7 +4,11 @@ #include <linux/property.h> +struct software_node; + #define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \ PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_) +extern const struct software_node swnode_gpio_undefined; + #endif /* __LINUX_GPIO_PROPERTY_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index b12cb1c8e6..8e06d89698 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -474,9 +474,9 @@ struct hid_usage { __s8 wheel_factor; /* 120/resolution_multiplier */ __u16 code; /* input driver code */ __u8 type; /* input driver type */ - __s8 hat_min; /* hat switch fun */ - __s8 hat_max; /* ditto */ - __s8 hat_dir; /* ditto */ + __s16 hat_min; /* hat switch fun */ + __s16 hat_max; /* ditto */ + __s16 hat_dir; /* ditto */ __s16 wheel_accumulated; /* hi-res wheel */ }; diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index 7118ac28d4..eec2592dec 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -103,6 +103,9 @@ struct hid_bpf_ops { unsigned char reportnum, __u8 *buf, size_t len, enum hid_report_type rtype, enum hid_class_request reqtype); + int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len); + int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type, + u8 *data, u32 size, int interrupt); struct module *owner; const struct bus_type *bus_type; }; @@ -149,10 +152,8 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; } static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {} static inline void hid_bpf_destroy_device(struct hid_device *hid) {} static inline void hid_bpf_device_init(struct hid_device *hid) {} -static inline u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size) -{ - return kmemdup(rdesc, *size, GFP_KERNEL); -} +#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \ + ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL)) #endif /* CONFIG_HID_BPF */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index de0c891050..c73ad77fa3 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -64,9 +64,6 @@ ssize_t single_hugepage_flag_show(struct kobject *kobj, enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; -#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) -#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) - /* * Mask of all large folio orders supported for anonymous THP; all orders up to * and including PMD_ORDER, except order-0 (which is not "huge") and order-1 @@ -75,26 +72,47 @@ extern struct kobj_attribute shmem_enabled_attr; #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) /* - * Mask of all large folio orders supported for file THP. + * Mask of all large folio orders supported for file THP. Folios in a DAX + * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to + * it. */ -#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER)) +#define THP_ORDERS_ALL_FILE_DAX \ + (BIT(PMD_ORDER) | BIT(PUD_ORDER)) +#define THP_ORDERS_ALL_FILE_DEFAULT \ + ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0)) /* * Mask of all large folio orders supported for THP. */ -#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE) +#define THP_ORDERS_ALL \ + (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT) -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ - (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) +#define TVA_SMAPS (1 << 0) /* Will be used for procfs */ +#define TVA_IN_PF (1 << 1) /* Page fault handler */ +#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ + (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order))) + +#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES #define HPAGE_PMD_SHIFT PMD_SHIFT -#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) +#define HPAGE_PUD_SHIFT PUD_SHIFT +#else +#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) +#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) +#endif + +#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) +#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) +#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) -#define HPAGE_PUD_SHIFT PUD_SHIFT -#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) +#define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT) +#define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER) #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) +#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern unsigned long transparent_hugepage_flags; extern unsigned long huge_anon_orders_always; @@ -210,17 +228,15 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) } unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, - unsigned long vm_flags, bool smaps, - bool in_pf, bool enforce_sysfs, + unsigned long vm_flags, + unsigned long tva_flags, unsigned long orders); /** * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma * @vma: the vm area to check * @vm_flags: use these vm_flags instead of vma->vm_flags - * @smaps: whether answer will be used for smaps file - * @in_pf: whether answer will be used by page fault handler - * @enforce_sysfs: whether sysfs config should be taken into account + * @tva_flags: Which TVA flags to honour * @orders: bitfield of all orders to consider * * Calculates the intersection of the requested hugepage orders and the allowed @@ -233,12 +249,12 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, */ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, - unsigned long vm_flags, bool smaps, - bool in_pf, bool enforce_sysfs, + unsigned long vm_flags, + unsigned long tva_flags, unsigned long orders) { /* Optimization to check if required orders are enabled early. */ - if (enforce_sysfs && vma_is_anonymous(vma)) { + if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) { unsigned long mask = READ_ONCE(huge_anon_orders_always); if (vm_flags & VM_HUGEPAGE) @@ -252,9 +268,37 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, return 0; } - return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, - enforce_sysfs, orders); + return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders); +} + +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC, + MTHP_STAT_ANON_FAULT_FALLBACK, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_SWPOUT, + MTHP_STAT_SWPOUT_FALLBACK, + __MTHP_STAT_COUNT +}; + +struct mthp_stat { + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; +}; + +#ifdef CONFIG_SYSFS +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ + if (order <= 0 || order > PMD_ORDER) + return; + + this_cpu_inc(mthp_stats.stats[order][item]); } +#else +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ +} +#endif #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -262,8 +306,10 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); +unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags); -void folio_prep_large_rmappable(struct folio *folio); bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); @@ -344,17 +390,15 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap); -struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, int flags, struct dev_pagemap **pgmap); vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); -extern struct page *huge_zero_page; +extern struct folio *huge_zero_folio; extern unsigned long huge_zero_pfn; -static inline bool is_huge_zero_page(struct page *page) +static inline bool is_huge_zero_folio(const struct folio *folio) { - return READ_ONCE(huge_zero_page) == page; + return READ_ONCE(huge_zero_folio) == folio; } static inline bool is_huge_zero_pmd(pmd_t pmd) @@ -367,8 +411,8 @@ static inline bool is_huge_zero_pud(pud_t pud) return false; } -struct page *mm_get_huge_zero_page(struct mm_struct *mm); -void mm_put_huge_zero_page(struct mm_struct *mm); +struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); +void mm_put_huge_zero_folio(struct mm_struct *mm); #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) @@ -378,13 +422,6 @@ static inline bool thp_migration_supported(void) } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ -#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) -#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) -#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) - -#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) -#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) -#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) static inline bool folio_test_pmd_mappable(struct folio *folio) { @@ -404,19 +441,25 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, } static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, - unsigned long vm_flags, bool smaps, - bool in_pf, bool enforce_sysfs, + unsigned long vm_flags, + unsigned long tva_flags, unsigned long orders) { return 0; } -static inline void folio_prep_large_rmappable(struct folio *folio) {} - #define transparent_hugepage_flags 0UL #define thp_get_unmapped_area NULL +static inline unsigned long +thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) +{ + return 0; +} + static inline bool can_split_folio(struct folio *folio, int *pextra_pins) { @@ -483,7 +526,7 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) return 0; } -static inline bool is_huge_zero_page(struct page *page) +static inline bool is_huge_zero_folio(const struct folio *folio) { return false; } @@ -498,7 +541,7 @@ static inline bool is_huge_zero_pud(pud_t pud) return false; } -static inline void mm_put_huge_zero_page(struct mm_struct *mm) +static inline void mm_put_huge_zero_folio(struct mm_struct *mm) { return; } @@ -509,12 +552,6 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, return NULL; } -static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, - unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) -{ - return NULL; -} - static inline bool thp_migration_supported(void) { return false; @@ -535,16 +572,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order) #define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0) #define split_folio(f) split_folio_to_order(f, 0) -/* - * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to - * limitations in the implementation like arm64 MTE can override this to - * false - */ -#ifndef arch_thp_swp_supported -static inline bool arch_thp_swp_supported(void) -{ - return true; -} -#endif - #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 77b30a8c60..8120d19761 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -174,8 +174,11 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud); +bool hugetlbfs_pagecache_present(struct hstate *h, + struct vm_area_struct *vma, + unsigned long address); -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages[MAX_NUMNODES]; @@ -272,13 +275,9 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma); int hugetlb_vma_trylock_write(struct vm_area_struct *vma); void hugetlb_vma_assert_locked(struct vm_area_struct *vma); void hugetlb_vma_lock_release(struct kref *kref); - -int pmd_huge(pmd_t pmd); -int pud_huge(pud_t pud); long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags); - bool is_hugetlb_entry_migration(pte_t pte); bool is_hugetlb_entry_hwpoisoned(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); @@ -298,8 +297,8 @@ static inline unsigned long hugetlb_total_pages(void) return 0; } -static inline struct address_space *hugetlb_page_mapping_lock_write( - struct page *hpage) +static inline struct address_space *hugetlb_folio_mapping_lock_write( + struct folio *folio) { return NULL; } @@ -329,13 +328,6 @@ static inline void hugetlb_zap_end( { } -static inline struct page *hugetlb_follow_page_mask( - struct vm_area_struct *vma, unsigned long address, unsigned int flags, - unsigned int *page_mask) -{ - BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/ -} - static inline int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *dst_vma, @@ -399,16 +391,6 @@ static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) { } -static inline int pmd_huge(pmd_t pmd) -{ - return 0; -} - -static inline int pud_huge(pud_t pud) -{ - return 0; -} - static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { @@ -493,16 +475,6 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } #endif /* !CONFIG_HUGETLB_PAGE */ -/* - * hugepages at page global directory. If arch support - * hugepages at pgd level, they need to define this. - */ -#ifndef pgd_huge -#define pgd_huge(x) 0 -#endif -#ifndef p4d_huge -#define p4d_huge(x) 0 -#endif #ifndef pgd_write static inline int pgd_write(pgd_t pgd) @@ -554,17 +526,13 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); } -extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, int creat_flags, int page_size_log); -static inline bool is_file_hugepages(struct file *file) +static inline bool is_file_hugepages(const struct file *file) { - if (file->f_op == &hugetlbfs_file_operations) - return true; - - return is_file_shm_hugepages(file); + return file->f_op->fop_flags & FOP_HUGE_PAGES; } static inline struct hstate *hstate_inode(struct inode *i) @@ -713,6 +681,7 @@ HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) /* Defines one hugetlb page size */ struct hstate { struct mutex resize_lock; + struct lock_class_key resize_key; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; @@ -747,7 +716,8 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask); + nodemask_t *nmask, gfp_t gfp_mask, + bool allow_alloc_fallback); int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t idx); void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, @@ -859,9 +829,9 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, #define is_hugepage_only_range is_hugepage_only_range #endif -#ifndef arch_clear_hugepage_flags -static inline void arch_clear_hugepage_flags(struct page *page) { } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#ifndef arch_clear_hugetlb_flags +static inline void arch_clear_hugetlb_flags(struct folio *folio) { } +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags #endif #ifndef arch_make_huge_pte @@ -888,8 +858,8 @@ static inline int hstate_index(struct hstate *h) return h - hstates; } -extern int dissolve_free_huge_page(struct page *page); -extern int dissolve_free_huge_pages(unsigned long start_pfn, +int dissolve_free_hugetlb_folio(struct folio *folio); +int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn); #ifdef CONFIG_MEMORY_FAILURE @@ -970,6 +940,30 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) return modified_mask; } +static inline bool htlb_allow_alloc_fallback(int reason) +{ + bool allowed_fallback = false; + + /* + * Note: the memory offline, memory failure and migration syscalls will + * be allowed to fallback to other nodes due to lack of a better chioce, + * that might break the per-node hugetlb pool. While other cases will + * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool. + */ + switch (reason) { + case MR_MEMORY_HOTPLUG: + case MR_MEMORY_FAILURE: + case MR_SYSCALL: + case MR_MEMPOLICY_MBIND: + allowed_fallback = true; + break; + default: + break; + } + + return allowed_fallback; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -1065,7 +1059,8 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, static inline struct folio * alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask) + nodemask_t *nmask, gfp_t gfp_mask, + bool allow_alloc_fallback) { return NULL; } @@ -1150,12 +1145,12 @@ static inline int hstate_index(struct hstate *h) return 0; } -static inline int dissolve_free_huge_page(struct page *page) +static inline int dissolve_free_hugetlb_folio(struct folio *folio) { return 0; } -static inline int dissolve_free_huge_pages(unsigned long start_pfn, +static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn) { return 0; @@ -1181,6 +1176,11 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) return 0; } +static inline bool htlb_allow_alloc_fallback(int reason) +{ + return false; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -1221,6 +1221,12 @@ static inline void hugetlb_register_node(struct node *node) static inline void hugetlb_unregister_node(struct node *node) { } + +static inline bool hugetlbfs_pagecache_present( + struct hstate *h, struct vm_area_struct *vma, unsigned long address) +{ + return false; +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 96ceb40954..5e39baa7f6 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -820,6 +820,8 @@ struct vmbus_requestor { #define VMBUS_RQST_RESET (U64_MAX - 3) struct vmbus_device { + /* preferred ring buffer size in KB, 0 means no preferred size for this device */ + size_t pref_ring_size; u16 dev_type; guid_t guid; bool perf_device; diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h index 98ef73b7c8..1784ac7afb 100644 --- a/include/linux/i2c-mux.h +++ b/include/linux/i2c-mux.h @@ -56,8 +56,7 @@ struct i2c_adapter *i2c_root_adapter(struct device *dev); * callback functions to perform hardware-specific mux control. */ int i2c_mux_add_adapter(struct i2c_mux_core *muxc, - u32 force_nr, u32 chan_id, - unsigned int class); + u32 force_nr, u32 chan_id); void i2c_mux_del_adapters(struct i2c_mux_core *muxc); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 5e6cd43a6d..424acb98c7 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -852,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) /* i2c adapter classes (bitmask) */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ -#define I2C_CLASS_SPD (1<<7) /* Memory modules */ /* Warn users that the adapter doesn't support classes anymore */ #define I2C_CLASS_DEPRECATED (1<<8) @@ -961,8 +960,6 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); #define builtin_i2c_driver(__i2c_driver) \ builtin_driver(__i2c_driver, i2c_add_driver) -#endif /* I2C */ - /* must call put_device() when done with returned i2c_client device */ struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode); @@ -972,6 +969,28 @@ struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode); /* must call i2c_put_adapter() when done with returned i2c_adapter device */ struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode); +#else /* I2C */ + +static inline struct i2c_client * +i2c_find_device_by_fwnode(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline struct i2c_adapter * +i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline struct i2c_adapter * +i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode) +{ + return NULL; +} + +#endif /* !I2C */ + #if IS_ENABLED(CONFIG_OF) /* must call put_device() when done with returned i2c_client device */ static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 4fd9735bb7..de2dce743e 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1287,6 +1287,24 @@ struct ieee80211_ttlm_elem { u8 optional[]; } __packed; +/** + * struct ieee80211_bss_load_elem - BSS Load elemen + * + * Defined in section 9.4.2.26 in IEEE 802.11-REVme D4.1 + * + * @sta_count: total number of STAs currently associated with the AP. + * @channel_util: Percentage of time that the access point sensed the channel + * was busy. This value is in range [0, 255], the highest value means + * 100% busy. + * @avail_admission_capa: remaining amount of medium time used for admission + * control. + */ +struct ieee80211_bss_load_elem { + __le16 sta_count; + u8 channel_util; + __le16 avail_admission_capa; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -2742,9 +2760,11 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 -#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 -#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 -#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 +#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 +#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 +#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 +#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3 +#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 4 /** * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 719cf9cc6e..383614ebd7 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -48,6 +48,7 @@ struct iio_dev; * be used. * @irq_flags: flags for the interrupt used by the triggered buffer * @num_slots: Number of sequencer slots + * @irq_line: IRQ for reading conversions. If 0, spi->irq will be used */ struct ad_sigma_delta_info { int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); @@ -62,6 +63,7 @@ struct ad_sigma_delta_info { unsigned int data_reg; unsigned long irq_flags; unsigned int num_slots; + int irq_line; }; /** @@ -89,6 +91,7 @@ struct ad_sigma_delta { unsigned int active_slots; unsigned int current_slot; unsigned int num_slots; + int irq_line; bool status_appended; /* map slots to channels in order to know what to expect from devices */ unsigned int *slots; diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index a6d7938186..8099759d72 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -4,6 +4,7 @@ #include <linux/types.h> +struct iio_chan_spec; struct fwnode_handle; struct iio_backend; struct device; @@ -15,12 +16,32 @@ enum iio_backend_data_type { IIO_BACKEND_DATA_TYPE_MAX }; +enum iio_backend_data_source { + IIO_BACKEND_INTERNAL_CONTINUOS_WAVE, + IIO_BACKEND_EXTERNAL, + IIO_BACKEND_DATA_SOURCE_MAX +}; + +/** + * IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute + * @_name: Attribute name + * @_shared: Whether the attribute is shared between all channels + * @_what: Data private to the driver + */ +#define IIO_BACKEND_EX_INFO(_name, _shared, _what) { \ + .name = (_name), \ + .shared = (_shared), \ + .read = iio_backend_ext_info_get, \ + .write = iio_backend_ext_info_set, \ + .private = (_what), \ +} + /** * struct iio_backend_data_fmt - Backend data format - * @type: Data type. - * @sign_extend: Bool to tell if the data is sign extended. - * @enable: Enable/Disable the data format module. If disabled, - * not formatting will happen. + * @type: Data type. + * @sign_extend: Bool to tell if the data is sign extended. + * @enable: Enable/Disable the data format module. If disabled, + * not formatting will happen. */ struct iio_backend_data_fmt { enum iio_backend_data_type type; @@ -28,15 +49,38 @@ struct iio_backend_data_fmt { bool enable; }; +/* vendor specific from 32 */ +enum iio_backend_test_pattern { + IIO_BACKEND_NO_TEST_PATTERN, + /* modified prbs9 */ + IIO_BACKEND_ADI_PRBS_9A = 32, + IIO_BACKEND_TEST_PATTERN_MAX +}; + +enum iio_backend_sample_trigger { + IIO_BACKEND_SAMPLE_TRIGGER_EDGE_FALLING, + IIO_BACKEND_SAMPLE_TRIGGER_EDGE_RISING, + IIO_BACKEND_SAMPLE_TRIGGER_MAX +}; + /** * struct iio_backend_ops - operations structure for an iio_backend - * @enable: Enable backend. - * @disable: Disable backend. - * @chan_enable: Enable one channel. - * @chan_disable: Disable one channel. - * @data_format_set: Configure the data format for a specific channel. - * @request_buffer: Request an IIO buffer. - * @free_buffer: Free an IIO buffer. + * @enable: Enable backend. + * @disable: Disable backend. + * @chan_enable: Enable one channel. + * @chan_disable: Disable one channel. + * @data_format_set: Configure the data format for a specific channel. + * @data_source_set: Configure the data source for a specific channel. + * @set_sample_rate: Configure the sampling rate for a specific channel. + * @test_pattern_set: Configure a test pattern. + * @chan_status: Get the channel status. + * @iodelay_set: Set digital I/O delay. + * @data_sample_trigger: Control when to sample data. + * @request_buffer: Request an IIO buffer. + * @free_buffer: Free an IIO buffer. + * @extend_chan_spec: Extend an IIO channel. + * @ext_info_set: Extended info setter. + * @ext_info_get: Extended info getter. **/ struct iio_backend_ops { int (*enable)(struct iio_backend *back); @@ -45,10 +89,30 @@ struct iio_backend_ops { int (*chan_disable)(struct iio_backend *back, unsigned int chan); int (*data_format_set)(struct iio_backend *back, unsigned int chan, const struct iio_backend_data_fmt *data); + int (*data_source_set)(struct iio_backend *back, unsigned int chan, + enum iio_backend_data_source data); + int (*set_sample_rate)(struct iio_backend *back, unsigned int chan, + u64 sample_rate_hz); + int (*test_pattern_set)(struct iio_backend *back, + unsigned int chan, + enum iio_backend_test_pattern pattern); + int (*chan_status)(struct iio_backend *back, unsigned int chan, + bool *error); + int (*iodelay_set)(struct iio_backend *back, unsigned int chan, + unsigned int taps); + int (*data_sample_trigger)(struct iio_backend *back, + enum iio_backend_sample_trigger trigger); struct iio_buffer *(*request_buffer)(struct iio_backend *back, struct iio_dev *indio_dev); void (*free_buffer)(struct iio_backend *back, struct iio_buffer *buffer); + int (*extend_chan_spec)(struct iio_backend *back, + struct iio_chan_spec *chan); + int (*ext_info_set)(struct iio_backend *back, uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len); + int (*ext_info_get)(struct iio_backend *back, uintptr_t private, + const struct iio_chan_spec *chan, char *buf); }; int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan); @@ -56,10 +120,31 @@ int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan); int devm_iio_backend_enable(struct device *dev, struct iio_backend *back); int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, const struct iio_backend_data_fmt *data); +int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, + enum iio_backend_data_source data); +int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, + u64 sample_rate_hz); +int iio_backend_test_pattern_set(struct iio_backend *back, + unsigned int chan, + enum iio_backend_test_pattern pattern); +int iio_backend_chan_status(struct iio_backend *back, unsigned int chan, + bool *error); +int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane, + unsigned int taps); +int iio_backend_data_sample_trigger(struct iio_backend *back, + enum iio_backend_sample_trigger trigger); int devm_iio_backend_request_buffer(struct device *dev, struct iio_backend *back, struct iio_dev *indio_dev); +ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len); +ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, char *buf); +int iio_backend_extend_chan_spec(struct iio_dev *indio_dev, + struct iio_backend *back, + struct iio_chan_spec *chan); void *iio_backend_get_priv(const struct iio_backend *conv); struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name); struct iio_backend * diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 18d3702fa9..6e27e47077 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -132,7 +132,9 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev); int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer); -size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, + const char __user *user_buffer); +size_t iio_dma_buffer_usage(struct iio_buffer *buffer); int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); int iio_dma_buffer_request_update(struct iio_buffer *buffer); diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index cbb8ba957f..81d9a19aeb 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -7,14 +7,28 @@ #ifndef __IIO_DMAENGINE_H__ #define __IIO_DMAENGINE_H__ +#include <linux/iio/buffer.h> + struct iio_dev; struct device; -struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel); void iio_dmaengine_buffer_free(struct iio_buffer *buffer); -int devm_iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel); +struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir); + +#define iio_dmaengine_buffer_setup(dev, indio_dev, channel) \ + iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \ + IIO_BUFFER_DIRECTION_IN) + +int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir); + +#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \ + devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \ + IIO_BUFFER_DIRECTION_IN) #endif diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h index a47d304d1b..8d506f1e9d 100644 --- a/include/linux/iio/common/inv_sensors_timestamp.h +++ b/include/linux/iio/common/inv_sensors_timestamp.h @@ -71,8 +71,7 @@ int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts, uint32_t period, bool fifo); void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts, - uint32_t fifo_period, size_t fifo_nb, - size_t sensor_nb, int64_t timestamp); + size_t sample_nb, int64_t timestamp); static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts) { diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index e370a7bb33..55e2b22086 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -788,6 +788,19 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) } #endif +#ifdef CONFIG_ACPI +bool iio_read_acpi_mount_matrix(struct device *dev, + struct iio_mount_matrix *orientation, + char *acpi_method); +#else +static inline bool iio_read_acpi_mount_matrix(struct device *dev, + struct iio_mount_matrix *orientation, + char *acpi_method) +{ + return false; +} +#endif + ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h index 1b608e0029..711a1f0d1a 100644 --- a/include/linux/instrumented.h +++ b/include/linux/instrumented.h @@ -148,6 +148,41 @@ instrument_copy_from_user_after(const void *to, const void __user *from, } /** + * instrument_memcpy_before - add instrumentation before non-instrumented memcpy + * @to: destination address + * @from: source address + * @n: number of bytes to copy + * + * Instrument memory accesses that happen in custom memcpy implementations. The + * instrumentation should be inserted before the memcpy call. + */ +static __always_inline void instrument_memcpy_before(void *to, const void *from, + unsigned long n) +{ + kasan_check_write(to, n); + kasan_check_read(from, n); + kcsan_check_write(to, n); + kcsan_check_read(from, n); +} + +/** + * instrument_memcpy_after - add instrumentation after non-instrumented memcpy + * @to: destination address + * @from: source address + * @n: number of bytes to copy + * @left: number of bytes not copied (if known) + * + * Instrument memory accesses that happen in custom memcpy implementations. The + * instrumentation should be inserted after the memcpy call. + */ +static __always_inline void instrument_memcpy_after(void *to, const void *from, + unsigned long n, + unsigned long left) +{ + kmsan_memmove(to, from, n - left); +} + +/** * instrument_get_user() - add instrumentation to get_user()-like macros * @to: destination variable, may not be address-taken * diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 459b796837..f584237235 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -8,6 +8,7 @@ #define _LINUX_INTEGRITY_H #include <linux/fs.h> +#include <linux/iversion.h> enum integrity_status { INTEGRITY_PASS = 0, @@ -28,4 +29,37 @@ static inline void integrity_load_keys(void) } #endif /* CONFIG_INTEGRITY */ +/* An inode's attributes for detection of changes */ +struct integrity_inode_attributes { + u64 version; /* track inode changes */ + unsigned long ino; + dev_t dev; +}; + +/* + * On stacked filesystems the i_version alone is not enough to detect file data + * or metadata change. Additional metadata is required. + */ +static inline void +integrity_inode_attrs_store(struct integrity_inode_attributes *attrs, + u64 i_version, const struct inode *inode) +{ + attrs->version = i_version; + attrs->dev = inode->i_sb->s_dev; + attrs->ino = inode->i_ino; +} + +/* + * On stacked filesystems detect whether the inode or its content has changed. + */ +static inline bool +integrity_inode_attrs_changed(const struct integrity_inode_attributes *attrs, + const struct inode *inode) +{ + return (inode->i_sb->s_dev != attrs->dev || + inode->i_ino != attrs->ino || + !inode_eq_iversion(inode, attrs->version)); +} + + #endif /* _LINUX_INTEGRITY_H */ diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index f3196f82fd..c0397423d3 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -158,6 +158,26 @@ struct rapl_if_priv { void *rpi; }; +#ifdef CONFIG_PERF_EVENTS +/** + * struct rapl_package_pmu_data: Per package data for PMU support + * @scale: Scale of 2^-32 Joules for each energy counter increase. + * @lock: Lock to protect n_active and active_list. + * @n_active: Number of active events. + * @active_list: List of active events. + * @timer_interval: Maximum timer expiration time before counter overflow. + * @hrtimer: Periodically update the counter to prevent overflow. + */ +struct rapl_package_pmu_data { + u64 scale[RAPL_DOMAIN_MAX]; + raw_spinlock_t lock; + int n_active; + struct list_head active_list; + ktime_t timer_interval; + struct hrtimer hrtimer; +}; +#endif + /* maximum rapl package domain name: package-%d-die-%d */ #define PACKAGE_DOMAIN_NAME_LENGTH 30 @@ -176,6 +196,10 @@ struct rapl_package { struct cpumask cpumask; char name[PACKAGE_DOMAIN_NAME_LENGTH]; struct rapl_if_priv *priv; +#ifdef CONFIG_PERF_EVENTS + bool has_pmu; + struct rapl_package_pmu_data pmu_data; +#endif }; struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, @@ -188,4 +212,12 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu); void rapl_remove_package(struct rapl_package *rp); +#ifdef CONFIG_PERF_EVENTS +int rapl_package_add_pmu(struct rapl_package *rp); +void rapl_package_remove_pmu(struct rapl_package *rp); +#else +static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; } +static inline void rapl_package_remove_pmu(struct rapl_package *rp) { } +#endif + #endif /* __INTEL_RAPL_H__ */ diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index a3529b962b..1e880cb0f4 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -27,16 +27,22 @@ enum intel_tpmi_id { /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance - * @package_id: CPU Package id - * @bus_number: PCI bus number - * @device_number: PCI device number + * @cdie_mask: Mask of all compute dies in the partition + * @package_id: CPU Package id + * @partition: Package partition id when multiple VSEC PCI devices per package + * @segment: PCI segment ID + * @bus_number: PCI bus number + * @device_number: PCI device number * @function_number: PCI function number * * Structure to store platform data for a TPMI device instance. This * struct is used to return data via tpmi_get_platform_data(). */ struct intel_tpmi_plat_info { + u16 cdie_mask; u8 package_id; + u8 partition; + u8 segment; u8 bus_number; u8 device_number; u8 function_number; diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5c9bdd3ffc..dac7466de5 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -168,7 +168,7 @@ static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev) { - return request_threaded_irq(irq, handler, NULL, flags, name, dev); + return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev); } extern int __must_check diff --git a/include/linux/io.h b/include/linux/io.h index 235ba7d80a..59ec5eea69 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -6,6 +6,7 @@ #ifndef _LINUX_IO_H #define _LINUX_IO_H +#include <linux/sizes.h> #include <linux/types.h> #include <linux/init.h> #include <linux/bug.h> @@ -16,9 +17,15 @@ struct device; struct resource; -__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); +#ifndef __iowrite32_copy +void __iowrite32_copy(void __iomem *to, const void *from, size_t count); +#endif + void __ioread32_copy(void *to, const void __iomem *from, size_t count); + +#ifndef __iowrite64_copy void __iowrite64_copy(void __iomem *to, const void *from, size_t count); +#endif #ifdef CONFIG_MMU int ioremap_page_range(unsigned long addr, unsigned long end, diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 68ed6697fe..e123d5e17b 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -11,7 +11,6 @@ void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); void io_uring_unreg_ringfd(void); const char *io_uring_get_opcode(u8 opcode); -int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) @@ -45,11 +44,6 @@ static inline const char *io_uring_get_opcode(u8 opcode) { return ""; } -static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd, - unsigned int issue_flags) -{ - return -EOPNOTSUPP; -} static inline bool io_is_uring_fops(struct file *file) { return false; diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index e453a997c0..447fbfd322 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -26,12 +26,25 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) #if defined(CONFIG_IO_URING) int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, struct iov_iter *iter, void *ioucmd); + +/* + * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd + * and the corresponding io_uring request. + * + * Note: the caller should never hard code @issue_flags and is only allowed + * to pass the mask provided by the core io_uring code. + */ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, unsigned issue_flags); + void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, void (*task_work_cb)(struct io_uring_cmd *, unsigned), unsigned flags); +/* + * Note: the caller should never hard code @issue_flags and only use the + * mask provided by the core io_uring code. + */ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, unsigned int issue_flags); @@ -56,6 +69,17 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, } #endif +/* + * Polled completions must ensure they are coming from a poll queue, and + * hence are completed inside the usual poll handling loops. + */ +static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd, + ssize_t ret, ssize_t res2) +{ + lockdep_assert(in_task()); + io_uring_cmd_done(ioucmd, ret, res2, 0); +} + /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, void (*task_work_cb)(struct io_uring_cmd *, unsigned)) diff --git a/include/linux/io_uring/net.h b/include/linux/io_uring/net.h new file mode 100644 index 0000000000..b58f39fed4 --- /dev/null +++ b/include/linux/io_uring/net.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_IO_URING_NET_H +#define _LINUX_IO_URING_NET_H + +struct io_uring_cmd; + +#if defined(CONFIG_IO_URING) +int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); + +#else +static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + return -EOPNOTSUPP; +} +#endif + +#endif diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 327d7f43c1..7abdc09271 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -205,8 +205,8 @@ struct io_submit_state { bool plug_started; bool need_plug; + bool cq_flush; unsigned short submit_nr; - unsigned int cqes_count; struct blk_plug plug; }; @@ -219,7 +219,7 @@ struct io_ev_fd { }; struct io_alloc_cache { - struct io_wq_work_node list; + void **entries; unsigned int nr_cached; unsigned int max_cached; size_t elem_size; @@ -299,6 +299,8 @@ struct io_ring_ctx { struct io_hash_table cancel_table_locked; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; + struct io_alloc_cache rw_cache; + struct io_alloc_cache uring_cache; /* * Any cancelable uring_cmd is added to this list in @@ -341,14 +343,8 @@ struct io_ring_ctx { unsigned cq_last_tm_flush; } ____cacheline_aligned_in_smp; - struct io_uring_cqe completion_cqes[16]; - spinlock_t completion_lock; - /* IRQ completion list, under ->completion_lock */ - unsigned int locked_free_nr; - struct io_wq_work_list locked_free_list; - struct list_head io_buffers_comp; struct list_head cq_overflow_list; struct io_hash_table cancel_table; @@ -371,9 +367,6 @@ struct io_ring_ctx { struct list_head io_buffers_cache; - /* deferred free list, protected by ->uring_lock */ - struct hlist_head io_buf_list; - /* Keep this last, we don't need it for the fast path */ struct wait_queue_head poll_wq; struct io_restriction restrictions; @@ -438,8 +431,6 @@ struct io_ring_ctx { }; struct io_tw_state { - /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */ - bool locked; }; enum { @@ -480,6 +471,7 @@ enum { REQ_F_CAN_POLL_BIT, REQ_F_BL_EMPTY_BIT, REQ_F_BL_NO_RECYCLE_BIT, + REQ_F_BUFFERS_COMMIT_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -558,6 +550,8 @@ enum { REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT), /* don't recycle provided buffers for this request */ REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), + /* buffer ring head needs incrementing on put */ + REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3b67d59a36..17b3f36ad8 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -69,8 +69,7 @@ enum iommu_fault_type { struct iommu_fault_page_request { #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) -#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2) -#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3) +#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2) u32 flags; u32 pasid; u32 grpid; @@ -518,6 +517,7 @@ static inline int __iommu_copy_struct_from_user_array( * Upon failure, ERR_PTR must be returned. * @domain_alloc_paging: Allocate an iommu_domain that can be used for * UNMANAGED, DMA, and DMA_FQ domain types. + * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -558,6 +558,8 @@ struct iommu_ops { struct device *dev, u32 flags, struct iommu_domain *parent, const struct iommu_user_data *user_data); struct iommu_domain *(*domain_alloc_paging)(struct device *dev); + struct iommu_domain *(*domain_alloc_sva)(struct device *dev, + struct mm_struct *mm); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); @@ -578,7 +580,8 @@ struct iommu_ops { struct iommu_page_response *msg); int (*def_domain_type)(struct device *dev); - void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); + void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain); const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; @@ -1445,9 +1448,6 @@ static inline void iommu_debugfs_setup(void) {} #ifdef CONFIG_IOMMU_DMA #include <linux/msi.h> -/* Setup call for arch DMA mapping code */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); - int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); @@ -1458,10 +1458,6 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); struct msi_desc; struct msi_msg; -static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) -{ -} - static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) { return -ENODEV; diff --git a/include/linux/iova.h b/include/linux/iova.h index 83c00fac2a..d2c4fd923e 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -65,6 +65,11 @@ static inline size_t iova_align(struct iova_domain *iovad, size_t size) return ALIGN(size, iovad->granule); } +static inline size_t iova_align_down(struct iova_domain *iovad, size_t size) +{ + return ALIGN_DOWN(size, iovad->granule); +} + static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) { return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); diff --git a/include/linux/irq.h b/include/linux/irq.h index 97baa937ab..a217e1029c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -115,7 +115,7 @@ enum { * Return value for chip->irq_set_affinity() * * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity - * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity + * IRQ_SET_MASK_NOCOPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping * all descendant irqchips. diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h new file mode 100644 index 0000000000..ec8f7df505 --- /dev/null +++ b/include/linux/irqchip/riscv-aplic.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ +#ifndef __LINUX_IRQCHIP_RISCV_APLIC_H +#define __LINUX_IRQCHIP_RISCV_APLIC_H + +#include <linux/bitops.h> + +#define APLIC_MAX_IDC BIT(14) +#define APLIC_MAX_SOURCE 1024 + +#define APLIC_DOMAINCFG 0x0000 +#define APLIC_DOMAINCFG_RDONLY 0x80000000 +#define APLIC_DOMAINCFG_IE BIT(8) +#define APLIC_DOMAINCFG_DM BIT(2) +#define APLIC_DOMAINCFG_BE BIT(0) + +#define APLIC_SOURCECFG_BASE 0x0004 +#define APLIC_SOURCECFG_D BIT(10) +#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff +#define APLIC_SOURCECFG_SM_MASK 0x00000007 +#define APLIC_SOURCECFG_SM_INACTIVE 0x0 +#define APLIC_SOURCECFG_SM_DETACH 0x1 +#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 +#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 +#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 +#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 + +#define APLIC_MMSICFGADDR 0x1bc0 +#define APLIC_MMSICFGADDRH 0x1bc4 +#define APLIC_SMSICFGADDR 0x1bc8 +#define APLIC_SMSICFGADDRH 0x1bcc + +#ifdef CONFIG_RISCV_M_MODE +#define APLIC_xMSICFGADDR APLIC_MMSICFGADDR +#define APLIC_xMSICFGADDRH APLIC_MMSICFGADDRH +#else +#define APLIC_xMSICFGADDR APLIC_SMSICFGADDR +#define APLIC_xMSICFGADDRH APLIC_SMSICFGADDRH +#endif + +#define APLIC_xMSICFGADDRH_L BIT(31) +#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f +#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24 +#define APLIC_xMSICFGADDRH_HHXS (APLIC_xMSICFGADDRH_HHXS_MASK << \ + APLIC_xMSICFGADDRH_HHXS_SHIFT) +#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7 +#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20 +#define APLIC_xMSICFGADDRH_LHXS (APLIC_xMSICFGADDRH_LHXS_MASK << \ + APLIC_xMSICFGADDRH_LHXS_SHIFT) +#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7 +#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16 +#define APLIC_xMSICFGADDRH_HHXW (APLIC_xMSICFGADDRH_HHXW_MASK << \ + APLIC_xMSICFGADDRH_HHXW_SHIFT) +#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf +#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12 +#define APLIC_xMSICFGADDRH_LHXW (APLIC_xMSICFGADDRH_LHXW_MASK << \ + APLIC_xMSICFGADDRH_LHXW_SHIFT) +#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff +#define APLIC_xMSICFGADDRH_BAPPN_SHIFT 0 +#define APLIC_xMSICFGADDRH_BAPPN (APLIC_xMSICFGADDRH_BAPPN_MASK << \ + APLIC_xMSICFGADDRH_BAPPN_SHIFT) + +#define APLIC_xMSICFGADDR_PPN_SHIFT 12 + +#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \ + (BIT(__lhxs) - 1) + +#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \ + (BIT(__lhxw) - 1) +#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \ + ((__lhxs)) +#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \ + (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \ + APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs)) + +#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \ + (BIT(__hhxw) - 1) +#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \ + ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT) +#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \ + (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \ + APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs)) + +#define APLIC_IRQBITS_PER_REG 32 + +#define APLIC_SETIP_BASE 0x1c00 +#define APLIC_SETIPNUM 0x1cdc + +#define APLIC_CLRIP_BASE 0x1d00 +#define APLIC_CLRIPNUM 0x1ddc + +#define APLIC_SETIE_BASE 0x1e00 +#define APLIC_SETIENUM 0x1edc + +#define APLIC_CLRIE_BASE 0x1f00 +#define APLIC_CLRIENUM 0x1fdc + +#define APLIC_SETIPNUM_LE 0x2000 +#define APLIC_SETIPNUM_BE 0x2004 + +#define APLIC_GENMSI 0x3000 + +#define APLIC_TARGET_BASE 0x3004 +#define APLIC_TARGET_HART_IDX_SHIFT 18 +#define APLIC_TARGET_HART_IDX_MASK 0x3fff +#define APLIC_TARGET_HART_IDX (APLIC_TARGET_HART_IDX_MASK << \ + APLIC_TARGET_HART_IDX_SHIFT) +#define APLIC_TARGET_GUEST_IDX_SHIFT 12 +#define APLIC_TARGET_GUEST_IDX_MASK 0x3f +#define APLIC_TARGET_GUEST_IDX (APLIC_TARGET_GUEST_IDX_MASK << \ + APLIC_TARGET_GUEST_IDX_SHIFT) +#define APLIC_TARGET_IPRIO_SHIFT 0 +#define APLIC_TARGET_IPRIO_MASK 0xff +#define APLIC_TARGET_IPRIO (APLIC_TARGET_IPRIO_MASK << \ + APLIC_TARGET_IPRIO_SHIFT) +#define APLIC_TARGET_EIID_SHIFT 0 +#define APLIC_TARGET_EIID_MASK 0x7ff +#define APLIC_TARGET_EIID (APLIC_TARGET_EIID_MASK << \ + APLIC_TARGET_EIID_SHIFT) + +#define APLIC_IDC_BASE 0x4000 +#define APLIC_IDC_SIZE 32 + +#define APLIC_IDC_IDELIVERY 0x00 + +#define APLIC_IDC_IFORCE 0x04 + +#define APLIC_IDC_ITHRESHOLD 0x08 + +#define APLIC_IDC_TOPI 0x18 +#define APLIC_IDC_TOPI_ID_SHIFT 16 +#define APLIC_IDC_TOPI_ID_MASK 0x3ff +#define APLIC_IDC_TOPI_ID (APLIC_IDC_TOPI_ID_MASK << \ + APLIC_IDC_TOPI_ID_SHIFT) +#define APLIC_IDC_TOPI_PRIO_SHIFT 0 +#define APLIC_IDC_TOPI_PRIO_MASK 0xff +#define APLIC_IDC_TOPI_PRIO (APLIC_IDC_TOPI_PRIO_MASK << \ + APLIC_IDC_TOPI_PRIO_SHIFT) + +#define APLIC_IDC_CLAIMI 0x1c + +#endif diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h new file mode 100644 index 0000000000..faf0b800b1 --- /dev/null +++ b/include/linux/irqchip/riscv-imsic.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ +#ifndef __LINUX_IRQCHIP_RISCV_IMSIC_H +#define __LINUX_IRQCHIP_RISCV_IMSIC_H + +#include <linux/types.h> +#include <linux/bitops.h> +#include <asm/csr.h> + +#define IMSIC_MMIO_PAGE_SHIFT 12 +#define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT) +#define IMSIC_MMIO_PAGE_LE 0x00 +#define IMSIC_MMIO_PAGE_BE 0x04 + +#define IMSIC_MIN_ID 63 +#define IMSIC_MAX_ID 2048 + +#define IMSIC_EIDELIVERY 0x70 + +#define IMSIC_EITHRESHOLD 0x72 + +#define IMSIC_EIP0 0x80 +#define IMSIC_EIP63 0xbf +#define IMSIC_EIPx_BITS 32 + +#define IMSIC_EIE0 0xc0 +#define IMSIC_EIE63 0xff +#define IMSIC_EIEx_BITS 32 + +#define IMSIC_FIRST IMSIC_EIDELIVERY +#define IMSIC_LAST IMSIC_EIE63 + +#define IMSIC_MMIO_SETIPNUM_LE 0x00 +#define IMSIC_MMIO_SETIPNUM_BE 0x04 + +struct imsic_local_config { + phys_addr_t msi_pa; + void __iomem *msi_va; +}; + +struct imsic_global_config { + /* + * MSI Target Address Scheme + * + * XLEN-1 12 0 + * | | | + * ------------------------------------------------------------- + * |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 | + * ------------------------------------------------------------- + */ + + /* Bits representing Guest index, HART index, and Group index */ + u32 guest_index_bits; + u32 hart_index_bits; + u32 group_index_bits; + u32 group_index_shift; + + /* Global base address matching all target MSI addresses */ + phys_addr_t base_addr; + + /* Number of interrupt identities */ + u32 nr_ids; + + /* Number of guest interrupt identities */ + u32 nr_guest_ids; + + /* Per-CPU IMSIC addresses */ + struct imsic_local_config __percpu *local; +}; + +#ifdef CONFIG_RISCV_IMSIC + +const struct imsic_global_config *imsic_get_global_config(void); + +#else + +static inline const struct imsic_global_config *imsic_get_global_config(void) +{ + return NULL; +} + +#endif + +#endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index d9451d456a..fd091c35d5 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -18,6 +18,18 @@ struct irq_domain; struct pt_regs; /** + * struct irqstat - interrupt statistics + * @cnt: real-time interrupt count + * @ref: snapshot of interrupt count + */ +struct irqstat { + unsigned int cnt; +#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT + unsigned int ref; +#endif +}; + +/** * struct irq_desc - interrupt descriptor * @irq_common_data: per irq and chip data passed down to chip functions * @kstat_irqs: irq stats per cpu @@ -55,7 +67,7 @@ struct pt_regs; struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; - unsigned int __percpu *kstat_irqs; + struct irqstat __percpu *kstat_irqs; irq_flow_handler_t handle_irq; struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; @@ -119,7 +131,7 @@ extern struct irq_desc irq_desc[NR_IRQS]; static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc, unsigned int cpu) { - return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; + return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; } static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) diff --git a/include/linux/irqdomain_defs.h b/include/linux/irqdomain_defs.h index 5c1fe6f1fc..36653e2ee1 100644 --- a/include/linux/irqdomain_defs.h +++ b/include/linux/irqdomain_defs.h @@ -25,7 +25,6 @@ enum irq_domain_bus_token { DOMAIN_BUS_PCI_DEVICE_MSIX, DOMAIN_BUS_DMAR, DOMAIN_BUS_AMDVI, - DOMAIN_BUS_PCI_DEVICE_IMS, DOMAIN_BUS_DEVICE_MSI, DOMAIN_BUS_WIRED_TO_MSI, }; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 971f3e826e..b900c64221 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1086,6 +1086,13 @@ struct journal_s int j_revoke_records_per_block; /** + * @j_transaction_overhead: + * + * Number of blocks each transaction needs for its own bookkeeping + */ + int j_transaction_overhead_buffers; + + /** * @j_commit_interval: * * What is the maximum transaction lifetime before we begin a commit? @@ -1434,7 +1441,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); extern void jbd2_journal_commit_transaction(journal_t *); /* Checkpoint list management */ -void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); +enum jbd2_shrink_type {JBD2_SHRINK_DESTROY, JBD2_SHRINK_BUSY_STOP, JBD2_SHRINK_BUSY_SKIP}; + +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, enum jbd2_shrink_type type); unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan); int __jbd2_journal_remove_checkpoint(struct journal_head *); int jbd2_journal_try_remove_checkpoint(struct journal_head *jh); @@ -1586,10 +1595,8 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); */ extern struct kmem_cache *jbd2_handle_cache; -static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) -{ - return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags); -} +#define jbd2_alloc_handle(_gfp_flags) \ + ((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags)) static inline void jbd2_free_handle(handle_t *handle) { @@ -1602,10 +1609,8 @@ static inline void jbd2_free_handle(handle_t *handle) */ extern struct kmem_cache *jbd2_inode_cache; -static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags) -{ - return kmem_cache_alloc(jbd2_inode_cache, gfp_flags); -} +#define jbd2_alloc_inode(_gfp_flags) \ + ((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags)) static inline void jbd2_free_inode(struct jbd2_inode *jinode) { @@ -1662,11 +1667,6 @@ int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); int jbd2_fc_release_bufs(journal_t *journal); -static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal) -{ - return (journal->j_total_len - journal->j_fc_wbufsize) / 4; -} - /* * is_journal_abort * @@ -1696,7 +1696,7 @@ static inline void jbd2_journal_abort_handle(handle_t *handle) static inline void jbd2_init_fs_dev_write_error(journal_t *journal) { - struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; + struct address_space *mapping = journal->j_fs_dev->bd_mapping; /* * Save the original wb_err value of client fs's bdev mapping which @@ -1707,7 +1707,7 @@ static inline void jbd2_init_fs_dev_write_error(journal_t *journal) static inline int jbd2_check_fs_dev_write_error(journal_t *journal) { - struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; + struct address_space *mapping = journal->j_fs_dev->bd_mapping; return errseq_check(&mapping->wb_err, READ_ONCE(journal->j_fs_dev_wb_err)); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f0a949b7c9..f5a2727ca4 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -216,6 +216,7 @@ extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; extern void jump_label_init(void); +extern void jump_label_init_ro(void); extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, @@ -265,6 +266,8 @@ static __always_inline void jump_label_init(void) static_key_initialized = true; } +static __always_inline void jump_label_init_ro(void) { } + static __always_inline bool static_key_false(struct static_key *key) { if (unlikely_notrace(static_key_count(key) > 0)) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 9935f7ecbf..9c042c6384 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -79,6 +79,14 @@ static inline unsigned int kstat_cpu_softirqs_sum(int cpu) return sum; } +#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT +extern void kstat_snapshot_irqs(void); +extern unsigned int kstat_get_irq_since_snapshot(unsigned int irq); +#else +static inline void kstat_snapshot_irqs(void) { } +static inline unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { return 0; } +#endif + /* * Number of interrupts per specific IRQ source, since bootup */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index f31bd304df..f0e9f8eda7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -319,8 +319,10 @@ struct kimage { /* If set, we are using file mode kexec syscall */ unsigned int file_mode:1; #ifdef CONFIG_CRASH_HOTPLUG - /* If set, allow changes to elfcorehdr of kexec_load'd image */ - unsigned int update_elfcorehdr:1; + /* If set, it is safe to update kexec segments that are + * excluded from SHA calculation. + */ + unsigned int hotplug_support:1; #endif #ifdef ARCH_HAS_KIMAGE_ARCH @@ -391,9 +393,10 @@ bool kexec_load_permitted(int kexec_image_type); /* List of defined/legal kexec flags */ #ifndef CONFIG_KEXEC_JUMP -#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR) +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR | KEXEC_CRASH_HOTPLUG_SUPPORT) #else -#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR) +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR | \ + KEXEC_CRASH_HOTPLUG_SUPPORT) #endif /* List of defined/legal kexec file flags */ diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 0b35a41440..564868bdce 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -36,10 +36,16 @@ * to lock the reader. */ -#include <linux/kernel.h> +#include <linux/array_size.h> +#include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/stddef.h> -#include <linux/scatterlist.h> +#include <linux/types.h> + +#include <asm/barrier.h> +#include <asm/errno.h> + +struct scatterlist; struct __kfifo { unsigned int in; @@ -304,19 +310,25 @@ __kfifo_uint_must_check_helper( \ ) /** - * kfifo_skip - skip output data + * kfifo_skip_count - skip output data * @fifo: address of the fifo to be used + * @count: count of data to skip */ -#define kfifo_skip(fifo) \ -(void)({ \ +#define kfifo_skip_count(fifo, count) do { \ typeof((fifo) + 1) __tmp = (fifo); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ if (__recsize) \ __kfifo_skip_r(__kfifo, __recsize); \ else \ - __kfifo->out++; \ -}) + __kfifo->out += (count); \ +} while(0) + +/** + * kfifo_skip - skip output data + * @fifo: address of the fifo to be used + */ +#define kfifo_skip(fifo) kfifo_skip_count(fifo, 1) /** * kfifo_peek_len - gets the size of the next fifo record @@ -578,7 +590,7 @@ __kfifo_uint_must_check_helper( \ * @buf: pointer to the storage buffer * @n: max. number of elements to get * - * This macro get some data from the fifo and return the numbers of elements + * This macro gets some data from the fifo and returns the numbers of elements * copied. * * Note that with only one concurrent reader and one concurrent @@ -605,7 +617,7 @@ __kfifo_uint_must_check_helper( \ * @n: max. number of elements to get * @lock: pointer to the spinlock to use for locking * - * This macro get the data from the fifo and return the numbers of elements + * This macro gets the data from the fifo and returns the numbers of elements * copied. */ #define kfifo_out_spinlocked(fifo, buf, n, lock) \ @@ -703,11 +715,12 @@ __kfifo_int_must_check_helper( \ ) /** - * kfifo_dma_in_prepare - setup a scatterlist for DMA input + * kfifo_dma_in_prepare_mapped - setup a scatterlist for DMA input * @fifo: address of the fifo to be used * @sgl: pointer to the scatterlist array * @nents: number of entries in the scatterlist array * @len: number of elements to transfer + * @dma: mapped dma address to fill into @sgl * * This macro fills a scatterlist for DMA input. * It returns the number entries in the scatterlist array. @@ -715,7 +728,7 @@ __kfifo_int_must_check_helper( \ * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these macros. */ -#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ +#define kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, dma) \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ struct scatterlist *__sgl = (sgl); \ @@ -724,16 +737,20 @@ __kfifo_int_must_check_helper( \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ (__recsize) ? \ - __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ - __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \ + __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \ + dma) : \ + __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len, dma); \ }) +#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ + kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR) + /** * kfifo_dma_in_finish - finish a DMA IN operation * @fifo: address of the fifo to be used * @len: number of bytes to received * - * This macro finish a DMA IN operation. The in counter will be updated by + * This macro finishes a DMA IN operation. The in counter will be updated by * the len parameter. No error checking will be done. * * Note that with only one concurrent reader and one concurrent @@ -752,11 +769,12 @@ __kfifo_int_must_check_helper( \ }) /** - * kfifo_dma_out_prepare - setup a scatterlist for DMA output + * kfifo_dma_out_prepare_mapped - setup a scatterlist for DMA output * @fifo: address of the fifo to be used * @sgl: pointer to the scatterlist array * @nents: number of entries in the scatterlist array * @len: number of elements to transfer + * @dma: mapped dma address to fill into @sgl * * This macro fills a scatterlist for DMA output which at most @len bytes * to transfer. @@ -766,7 +784,7 @@ __kfifo_int_must_check_helper( \ * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these macros. */ -#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ +#define kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, dma) \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ struct scatterlist *__sgl = (sgl); \ @@ -775,32 +793,29 @@ __kfifo_int_must_check_helper( \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ (__recsize) ? \ - __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ - __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \ + __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \ + dma) : \ + __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len, dma); \ }) +#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ + kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR) + /** * kfifo_dma_out_finish - finish a DMA OUT operation * @fifo: address of the fifo to be used * @len: number of bytes transferred * - * This macro finish a DMA OUT operation. The out counter will be updated by + * This macro finishes a DMA OUT operation. The out counter will be updated by * the len parameter. No error checking will be done. * * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these macros. */ -#define kfifo_dma_out_finish(fifo, len) \ -(void)({ \ - typeof((fifo) + 1) __tmp = (fifo); \ - unsigned int __len = (len); \ - const size_t __recsize = sizeof(*__tmp->rectype); \ - struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (__recsize) \ - __kfifo_dma_out_finish_r(__kfifo, __recsize); \ - else \ - __kfifo->out += __len / sizeof(*__tmp->type); \ -}) +#define kfifo_dma_out_finish(fifo, len) do { \ + typeof((fifo) + 1) ___tmp = (fifo); \ + kfifo_skip_count(___tmp, (len) / sizeof(*___tmp->type)); \ +} while (0) /** * kfifo_out_peek - gets some data from the fifo @@ -808,7 +823,7 @@ __kfifo_int_must_check_helper( \ * @buf: pointer to the storage buffer * @n: max. number of elements to get * - * This macro get the data from the fifo and return the numbers of elements + * This macro gets the data from the fifo and returns the numbers of elements * copied. The data is not removed from the fifo. * * Note that with only one concurrent reader and one concurrent @@ -828,6 +843,63 @@ __kfifo_uint_must_check_helper( \ }) \ ) +/** + * kfifo_out_linear - gets a tail of/offset to available data + * @fifo: address of the fifo to be used + * @tail: pointer to an unsigned int to store the value of tail + * @n: max. number of elements to point at + * + * This macro obtains the offset (tail) to the available data in the fifo + * buffer and returns the + * numbers of elements available. It returns the available count till the end + * of data or till the end of the buffer. So that it can be used for linear + * data processing (like memcpy() of (@fifo->data + @tail) with count + * returned). + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out_linear(fifo, tail, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + unsigned int *__tail = (tail); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_out_linear_r(__kfifo, __tail, __n, __recsize) : \ + __kfifo_out_linear(__kfifo, __tail, __n); \ +}) \ +) + +/** + * kfifo_out_linear_ptr - gets a pointer to the available data + * @fifo: address of the fifo to be used + * @ptr: pointer to data to store the pointer to tail + * @n: max. number of elements to point at + * + * Similarly to kfifo_out_linear(), this macro obtains the pointer to the + * available data in the fifo buffer and returns the numbers of elements + * available. It returns the available count till the end of available data or + * till the end of the buffer. So that it can be used for linear data + * processing (like memcpy() of @ptr with count returned). + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out_linear_ptr(fifo, ptr, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) ___tmp = (fifo); \ + unsigned int ___tail; \ + unsigned int ___n = kfifo_out_linear(___tmp, &___tail, (n)); \ + *(ptr) = ___tmp->kfifo.data + ___tail * kfifo_esize(___tmp); \ + ___n; \ +}) \ +) + + extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, size_t esize, gfp_t gfp_mask); @@ -849,14 +921,17 @@ extern int __kfifo_to_user(struct __kfifo *fifo, void __user *to, unsigned long len, unsigned int *copied); extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len); + struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma); extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len); + struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma); extern unsigned int __kfifo_out_peek(struct __kfifo *fifo, void *buf, unsigned int len); +extern unsigned int __kfifo_out_linear(struct __kfifo *fifo, + unsigned int *tail, unsigned int n); + extern unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, unsigned int len, size_t recsize); @@ -871,15 +946,15 @@ extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, unsigned long len, unsigned int *copied, size_t recsize); extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma); extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo, unsigned int len, size_t recsize); extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); - -extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma); extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); @@ -888,6 +963,9 @@ extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, unsigned int len, size_t recsize); +extern unsigned int __kfifo_out_linear_r(struct __kfifo *fifo, + unsigned int *tail, unsigned int n, size_t recsize); + extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize); #endif diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h index c4cae333de..e1082dc40a 100644 --- a/include/linux/kmsan-checks.h +++ b/include/linux/kmsan-checks.h @@ -61,6 +61,17 @@ void kmsan_check_memory(const void *address, size_t size); void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy, size_t left); +/** + * kmsan_memmove() - Notify KMSAN about a data copy within kernel. + * @to: destination address in the kernel. + * @from: source address in the kernel. + * @size: number of bytes to copy. + * + * Invoked after non-instrumented version (e.g. implemented using assembly + * code) of memmove()/memcpy() is called, in order to copy KMSAN's metadata. + */ +void kmsan_memmove(void *to, const void *from, size_t to_copy); + #else static inline void kmsan_poison_memory(const void *address, size_t size, @@ -78,6 +89,10 @@ static inline void kmsan_copy_to_user(void __user *to, const void *from, { } +static inline void kmsan_memmove(void *to, const void *from, size_t to_copy) +{ +} + #endif #endif /* _LINUX_KMSAN_CHECKS_H */ diff --git a/include/linux/ksm.h b/include/linux/ksm.h index f4692ec361..11690dacd9 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -56,16 +56,8 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm) static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { - int ret; - - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { - ret = __ksm_enter(mm); - if (ret) - return ret; - } - - if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags)) - set_bit(MMF_VM_MERGE_ANY, &mm->flags); + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + return __ksm_enter(mm); return 0; } @@ -100,15 +92,9 @@ struct folio *ksm_might_need_to_copy(struct folio *folio, void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); - -#ifdef CONFIG_MEMORY_FAILURE -void collect_procs_ksm(struct page *page, struct list_head *to_kill, - int force_early); -#endif - -#ifdef CONFIG_PROC_FS +void collect_procs_ksm(struct folio *folio, struct page *page, + struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); -#endif /* CONFIG_PROC_FS */ #else /* !CONFIG_KSM */ @@ -139,12 +125,10 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { } -#ifdef CONFIG_MEMORY_FAILURE -static inline void collect_procs_ksm(struct page *page, +static inline void collect_procs_ksm(struct folio *folio, struct page *page, struct list_head *to_kill, int force_early) { } -#endif #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 48f31dcd31..692c01e41a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -193,8 +193,6 @@ static inline bool is_error_page(struct page *page) bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); -bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except); #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -259,7 +257,6 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER union kvm_mmu_notifier_arg { - pte_t pte; unsigned long attributes; }; @@ -273,7 +270,6 @@ struct kvm_gfn_range { bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); #endif enum { diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index d93f6522b2..827ecc0b7e 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -86,6 +86,7 @@ struct gfn_to_pfn_cache { struct kvm_mmu_memory_cache { gfp_t gfp_zero; gfp_t gfp_custom; + u64 init_value; struct kmem_cache *kmem_cache; int capacity; int nobjs; diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 238fb1dfed..68703a51dc 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -61,7 +61,7 @@ struct lcd_device { points to something in the body of that driver, it is also invalid. */ struct mutex ops_lock; /* If this is NULL, the backing module is unloaded */ - struct lcd_ops *ops; + const struct lcd_ops *ops; /* Serialise access to set_power method */ struct mutex update_lock; /* The framebuffer notifier block */ @@ -102,10 +102,10 @@ static inline void lcd_set_power(struct lcd_device *ld, int power) } extern struct lcd_device *lcd_device_register(const char *name, - struct device *parent, void *devdata, struct lcd_ops *ops); + struct device *parent, void *devdata, const struct lcd_ops *ops); extern struct lcd_device *devm_lcd_device_register(struct device *dev, const char *name, struct device *parent, - void *devdata, struct lcd_ops *ops); + void *devdata, const struct lcd_ops *ops); extern void lcd_device_unregister(struct lcd_device *ld); extern void devm_lcd_device_unregister(struct device *dev, struct lcd_device *ld); diff --git a/include/linux/leds.h b/include/linux/leds.h index db6b114bb3..6300313c46 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -455,6 +455,9 @@ struct led_trigger { int (*activate)(struct led_classdev *led_cdev); void (*deactivate)(struct led_classdev *led_cdev); + /* Brightness set by led_trigger_event */ + enum led_brightness brightness; + /* LED-private triggers have this set */ struct led_hw_trigger_type *trigger_type; @@ -508,6 +511,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) return led_cdev->trigger_data; } +static inline enum led_brightness +led_trigger_get_brightness(const struct led_trigger *trigger) +{ + return trigger ? trigger->brightness : LED_OFF; +} + #define module_led_trigger(__led_trigger) \ module_driver(__led_trigger, led_trigger_register, \ led_trigger_unregister) @@ -544,6 +553,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) return NULL; } +static inline enum led_brightness +led_trigger_get_brightness(const struct led_trigger *trigger) +{ + return LED_OFF; +} + #endif /* CONFIG_LEDS_TRIGGERS */ /* Trigger specific enum */ @@ -690,18 +705,4 @@ enum led_audio { NUM_AUDIO_LEDS }; -#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO) -enum led_brightness ledtrig_audio_get(enum led_audio type); -void ledtrig_audio_set(enum led_audio type, enum led_brightness state); -#else -static inline enum led_brightness ledtrig_audio_get(enum led_audio type) -{ - return LED_OFF; -} -static inline void ledtrig_audio_set(enum led_audio type, - enum led_brightness state) -{ -} -#endif - #endif /* __LINUX_LEDS_H_INCLUDED */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 186a6cbbbf..7d3bd7c966 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1152,12 +1152,19 @@ extern int ata_std_bios_param(struct scsi_device *sdev, sector_t capacity, int geom[]); extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); extern int ata_scsi_slave_alloc(struct scsi_device *sdev); -extern int ata_scsi_slave_config(struct scsi_device *sdev); +int ata_scsi_device_configure(struct scsi_device *sdev, + struct queue_limits *lim); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth); extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, int queue_depth); +extern int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev, + bool *supported); +extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev, + bool *enabled); +extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev, + bool enable); extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); @@ -1245,7 +1252,8 @@ extern void ata_port_probe(struct ata_port *ap); extern void ata_port_free(struct ata_port *ap); extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); extern void ata_sas_tport_delete(struct ata_port *ap); -extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); +int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_port *ap); extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis); @@ -1411,13 +1419,13 @@ extern const struct attribute_group *ata_common_sdev_groups[]; __ATA_BASE_SHT(drv_name), \ .can_queue = ATA_DEF_QUEUE, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure #define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \ __ATA_BASE_SHT(drv_name), \ .can_queue = drv_qd, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure #define ATA_BASE_SHT(drv_name) \ ATA_SUBBASE_SHT(drv_name), \ diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index 287f590ed5..d94bfd9ac8 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -43,29 +43,10 @@ static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1, return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) -{ - __set_bit(nr, addr); -} - -static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) -{ - __clear_bit(nr, addr); -} - -static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, - int set) -{ - if (set) - linkmode_set_bit(nr, addr); - else - linkmode_clear_bit(nr, addr); -} - -static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) -{ - return test_bit(nr, addr); -} +#define linkmode_test_bit test_bit +#define linkmode_set_bit __set_bit +#define linkmode_clear_bit __clear_bit +#define linkmode_mod_bit __assign_bit static inline void linkmode_set_bit_array(const int *array, int array_size, unsigned long *addr) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9b9b38e895..51a258c24f 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -18,9 +18,9 @@ #if IS_ENABLED(CONFIG_LIVEPATCH) /* task patch states */ -#define KLP_UNDEFINED -1 -#define KLP_UNPATCHED 0 -#define KLP_PATCHED 1 +#define KLP_TRANSITION_IDLE -1 +#define KLP_TRANSITION_UNPATCHED 0 +#define KLP_TRANSITION_PATCHED 1 /** * struct klp_func - function structure for live patching diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 7e539f6f8c..855db460e0 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -144,6 +144,7 @@ LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry, LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) LSM_HOOK(int, 0, inode_getattr, const struct path *path) +LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name) LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) @@ -176,7 +177,8 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) -LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name) +LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src, + const char *name) LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, struct kernfs_node *kn) LSM_HOOK(int, 0, file_permission, struct file *file, int mask) diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 693eba9869..b1fbe41184 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -7,6 +7,7 @@ /* Known PHY IDs */ #define MARVELL_PHY_ID_88E1101 0x01410c60 +#define MARVELL_PHY_ID_88E3082 0x01410c80 #define MARVELL_PHY_ID_88E1112 0x01410c90 #define MARVELL_PHY_ID_88E1111 0x01410cc0 #define MARVELL_PHY_ID_88E1118 0x01410e10 @@ -31,6 +32,8 @@ /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 +/* ID from 88E6020, assumed to be the same for the whole 6250 family */ +#define MARVELL_PHY_ID_88E6250_FAMILY 0x01410db0 /* These Ethernet switch families contain embedded PHYs, but they do * not have a model ID. So the switch driver traps reads to the ID2 * register and returns the switch family ID diff --git a/include/linux/math64.h b/include/linux/math64.h index bf74478926..d34def7f9a 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -4,8 +4,8 @@ #include <linux/types.h> #include <linux/math.h> -#include <vdso/math64.h> #include <asm/div64.h> +#include <vdso/math64.h> #if BITS_PER_LONG == 64 @@ -179,16 +179,12 @@ static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) #ifndef mul_u64_u32_shr static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) { - u32 ah, al; + u32 ah = a >> 32, al = a; u64 ret; - al = a; - ah = a >> 32; - ret = mul_u32_u32(al, mul) >> shift; if (ah) ret += mul_u32_u32(ah, mul) << (32 - shift); - return ret; } #endif /* mul_u64_u32_shr */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 394fd0a887..030d34e9d1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -83,6 +83,8 @@ enum mem_cgroup_events_target { struct memcg_vmstats_percpu; struct memcg_vmstats; +struct lruvec_stats_percpu; +struct lruvec_stats; struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; @@ -90,25 +92,6 @@ struct mem_cgroup_reclaim_iter { unsigned int generation; }; -struct lruvec_stats_percpu { - /* Local (CPU and cgroup) state */ - long state[NR_VM_NODE_STAT_ITEMS]; - - /* Delta calculation for lockless upward propagation */ - long state_prev[NR_VM_NODE_STAT_ITEMS]; -}; - -struct lruvec_stats { - /* Aggregated (CPU and subtree) state */ - long state[NR_VM_NODE_STAT_ITEMS]; - - /* Non-hierarchical (CPU aggregated) state */ - long state_local[NR_VM_NODE_STAT_ITEMS]; - - /* Pending child counts during tree propagation */ - long state_pending[NR_VM_NODE_STAT_ITEMS]; -}; - /* * per-node information in memory controller. */ @@ -116,7 +99,7 @@ struct mem_cgroup_per_node { struct lruvec lruvec; struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; - struct lruvec_stats lruvec_stats; + struct lruvec_stats *lruvec_stats; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; @@ -349,15 +332,32 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; enum page_memcg_data_flags { - /* page->memcg_data is a pointer to an objcgs vector */ - MEMCG_DATA_OBJCGS = (1UL << 0), + /* page->memcg_data is a pointer to an slabobj_ext vector */ + MEMCG_DATA_OBJEXTS = (1UL << 0), /* page has been accounted as a non-slab kernel page */ MEMCG_DATA_KMEM = (1UL << 1), /* the next bit after the last actual flag */ __NR_MEMCG_DATA_FLAGS = (1UL << 2), }; -#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) +#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS + +#else /* CONFIG_MEMCG */ + +#define __FIRST_OBJEXT_FLAG (1UL << 0) + +#endif /* CONFIG_MEMCG */ + +enum objext_flags { + /* slabobj_ext vector failed to allocate */ + OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG, + /* the next bit after the last actual flag */ + __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1), +}; + +#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) + +#ifdef CONFIG_MEMCG static inline bool folio_memcg_kmem(struct folio *folio); @@ -388,10 +388,10 @@ static inline struct mem_cgroup *__folio_memcg(struct folio *folio) unsigned long memcg_data = folio->memcg_data; VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); + VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* @@ -409,10 +409,10 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) unsigned long memcg_data = folio->memcg_data; VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); + VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); - return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* @@ -469,11 +469,11 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) if (memcg_data & MEMCG_DATA_KMEM) { struct obj_cgroup *objcg; - objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); return obj_cgroup_memcg(objcg); } - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* @@ -506,17 +506,17 @@ static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) */ unsigned long memcg_data = READ_ONCE(folio->memcg_data); - if (memcg_data & MEMCG_DATA_OBJCGS) + if (memcg_data & MEMCG_DATA_OBJEXTS) return NULL; if (memcg_data & MEMCG_DATA_KMEM) { struct obj_cgroup *objcg; - objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); return obj_cgroup_memcg(objcg); } - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } static inline struct mem_cgroup *page_memcg_check(struct page *page) @@ -552,7 +552,7 @@ retry: static inline bool folio_memcg_kmem(struct folio *folio) { VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); - VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); + VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio); return folio->memcg_data & MEMCG_DATA_KMEM; } @@ -818,7 +818,8 @@ static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, static inline void obj_cgroup_put(struct obj_cgroup *objcg) { - percpu_ref_put(&objcg->refcnt); + if (objcg) + percpu_ref_put(&objcg->refcnt); } static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) @@ -973,7 +974,8 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); void folio_memcg_lock(struct folio *folio); void folio_memcg_unlock(struct folio *folio); -void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); +void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, + int val); /* try to stablize folio_memcg() for all the pages in a memcg */ static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) @@ -994,7 +996,7 @@ static inline void mem_cgroup_unlock_pages(void) /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void mod_memcg_state(struct mem_cgroup *memcg, - int idx, int val) + enum memcg_stat_item idx, int val) { unsigned long flags; @@ -1004,7 +1006,7 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, } static inline void mod_memcg_page_state(struct page *page, - int idx, int val) + enum memcg_stat_item idx, int val) { struct mem_cgroup *memcg; @@ -1019,48 +1021,13 @@ static inline void mod_memcg_page_state(struct page *page, } unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); - -static inline unsigned long lruvec_page_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - struct mem_cgroup_per_node *pn; - long x; - - if (mem_cgroup_disabled()) - return node_page_state(lruvec_pgdat(lruvec), idx); - - pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - x = READ_ONCE(pn->lruvec_stats.state[idx]); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; -} - -static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, - enum node_stat_item idx) -{ - struct mem_cgroup_per_node *pn; - long x = 0; - - if (mem_cgroup_disabled()) - return node_page_state(lruvec_pgdat(lruvec), idx); - - pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - x = READ_ONCE(pn->lruvec_stats.state_local[idx]); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; -} +unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx); +unsigned long lruvec_page_state_local(struct lruvec *lruvec, + enum node_stat_item idx); void mem_cgroup_flush_stats(struct mem_cgroup *memcg); void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); -void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, - int val); void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, @@ -1073,16 +1040,6 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, local_irq_restore(flags); } -static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_memcg_lruvec_state(lruvec, idx, val); - local_irq_restore(flags); -} - void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count); @@ -1535,19 +1492,19 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) } static inline void __mod_memcg_state(struct mem_cgroup *memcg, - int idx, + enum memcg_stat_item idx, int nr) { } static inline void mod_memcg_state(struct mem_cgroup *memcg, - int idx, + enum memcg_stat_item idx, int nr) { } static inline void mod_memcg_page_state(struct page *page, - int idx, int val) + enum memcg_stat_item idx, int val) { } @@ -1576,11 +1533,6 @@ static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) { } -static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ -} - static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) { @@ -1632,6 +1584,19 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, } #endif /* CONFIG_MEMCG */ +/* + * Extended information for slab objects stored as an array in page->memcg_data + * if MEMCG_DATA_OBJEXTS is set. + */ +struct slabobj_ext { +#ifdef CONFIG_MEMCG_KMEM + struct obj_cgroup *objcg; +#endif +#ifdef CONFIG_MEM_ALLOC_PROFILING + union codetag_ref ref; +#endif +} __aligned(8); + static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) { __mod_lruvec_kmem_state(p, idx, 1); diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index 69e7819000..0d70788558 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -48,6 +48,9 @@ int mt_calc_adistance(int node, int *adist); int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, const char *source); int mt_perf_to_adistance(struct access_coordinate *perf, int *adist); +struct memory_dev_type *mt_find_alloc_memory_type(int adist, + struct list_head *memory_types); +void mt_put_memory_types(struct list_head *memory_types); #ifdef CONFIG_MIGRATION int next_demotion_node(int node); void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); @@ -136,5 +139,15 @@ static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adis { return -EIO; } + +static inline struct memory_dev_type *mt_find_alloc_memory_type(int adist, + struct list_head *memory_types) +{ + return NULL; +} + +static inline void mt_put_memory_types(struct list_head *memory_types) +{ +} #endif /* CONFIG_NUMA */ #endif /* _LINUX_MEMORY_TIERS_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 931b118336..1add16f216 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -167,7 +167,8 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ extern bool vma_migratable(struct vm_area_struct *vma); -int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); +int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, + unsigned long addr); extern void mpol_put_task_policy(struct task_struct *); static inline bool mpol_is_preferred_many(struct mempolicy *pol) @@ -282,7 +283,7 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol) #endif static inline int mpol_misplaced(struct folio *folio, - struct vm_area_struct *vma, + struct vm_fault *vmf, unsigned long address) { return -1; /* no node preference */ diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 16c5cc807f..7b15144134 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -5,6 +5,8 @@ #ifndef _LINUX_MEMPOOL_H #define _LINUX_MEMPOOL_H +#include <linux/sched.h> +#include <linux/alloc_tag.h> #include <linux/wait.h> #include <linux/compiler.h> @@ -39,18 +41,32 @@ void mempool_exit(mempool_t *pool); int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id); -int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + +int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); +#define mempool_init(...) \ + alloc_hooks(mempool_init_noprof(__VA_ARGS__)) extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); -extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + +extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int nid); +#define mempool_create_node(...) \ + alloc_hooks(mempool_create_node_noprof(__VA_ARGS__)) + +#define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \ + mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \ + GFP_KERNEL, NUMA_NO_NODE) extern int mempool_resize(mempool_t *pool, int new_min_nr); extern void mempool_destroy(mempool_t *pool); -extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; + +extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; +#define mempool_alloc(...) \ + alloc_hooks(mempool_alloc_noprof(__VA_ARGS__)) + extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc; extern void mempool_free(void *element, mempool_t *pool); @@ -62,19 +78,10 @@ extern void mempool_free(void *element, mempool_t *pool); void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); void mempool_free_slab(void *element, void *pool_data); -static inline int -mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc) -{ - return mempool_init(pool, min_nr, mempool_alloc_slab, - mempool_free_slab, (void *) kc); -} - -static inline mempool_t * -mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) -{ - return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, - (void *) kc); -} +#define mempool_init_slab_pool(_pool, _min_nr, _kc) \ + mempool_init(_pool, (_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc)) +#define mempool_create_slab_pool(_min_nr, _kc) \ + mempool_create((_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc)) /* * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the @@ -83,17 +90,12 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); void mempool_kfree(void *element, void *pool_data); -static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size) -{ - return mempool_init(pool, min_nr, mempool_kmalloc, - mempool_kfree, (void *) size); -} - -static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) -{ - return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, - (void *) size); -} +#define mempool_init_kmalloc_pool(_pool, _min_nr, _size) \ + mempool_init(_pool, (_min_nr), mempool_kmalloc, mempool_kfree, \ + (void *)(unsigned long)(_size)) +#define mempool_create_kmalloc_pool(_min_nr, _size) \ + mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \ + (void *)(unsigned long)(_size)) void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data); void mempool_kvfree(void *element, void *pool_data); @@ -115,16 +117,11 @@ static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size) void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); void mempool_free_pages(void *element, void *pool_data); -static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) -{ - return mempool_init(pool, min_nr, mempool_alloc_pages, - mempool_free_pages, (void *)(long)order); -} - -static inline mempool_t *mempool_create_page_pool(int min_nr, int order) -{ - return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, - (void *)(long)order); -} +#define mempool_init_page_pool(_pool, _min_nr, _order) \ + mempool_init(_pool, (_min_nr), mempool_alloc_pages, \ + mempool_free_pages, (void *)(long)(_order)) +#define mempool_create_page_pool(_min_nr, _order) \ + mempool_create((_min_nr), mempool_alloc_pages, \ + mempool_free_pages, (void *)(long)(_order)) #endif /* _LINUX_MEMPOOL_H */ diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index f1755163dd..8c0a33a2e9 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -19,6 +19,7 @@ enum axp20x_variants { AXP223_ID, AXP288_ID, AXP313A_ID, + AXP717_ID, AXP803_ID, AXP806_ID, AXP809_ID, @@ -104,15 +105,47 @@ enum axp20x_variants { #define AXP313A_ON_INDICATE 0x00 #define AXP313A_OUTPUT_CONTROL 0x10 -#define AXP313A_DCDC1_CONRTOL 0x13 -#define AXP313A_DCDC2_CONRTOL 0x14 -#define AXP313A_DCDC3_CONRTOL 0x15 -#define AXP313A_ALDO1_CONRTOL 0x16 -#define AXP313A_DLDO1_CONRTOL 0x17 +#define AXP313A_DCDC1_CONTROL 0x13 +#define AXP313A_DCDC2_CONTROL 0x14 +#define AXP313A_DCDC3_CONTROL 0x15 +#define AXP313A_ALDO1_CONTROL 0x16 +#define AXP313A_DLDO1_CONTROL 0x17 #define AXP313A_SHUTDOWN_CTRL 0x1a #define AXP313A_IRQ_EN 0x20 #define AXP313A_IRQ_STATE 0x21 +#define AXP717_ON_INDICATE 0x00 +#define AXP717_IRQ0_EN 0x40 +#define AXP717_IRQ1_EN 0x41 +#define AXP717_IRQ2_EN 0x42 +#define AXP717_IRQ3_EN 0x43 +#define AXP717_IRQ4_EN 0x44 +#define AXP717_IRQ0_STATE 0x48 +#define AXP717_IRQ1_STATE 0x49 +#define AXP717_IRQ2_STATE 0x4a +#define AXP717_IRQ3_STATE 0x4b +#define AXP717_IRQ4_STATE 0x4c +#define AXP717_DCDC_OUTPUT_CONTROL 0x80 +#define AXP717_DCDC1_CONTROL 0x83 +#define AXP717_DCDC2_CONTROL 0x84 +#define AXP717_DCDC3_CONTROL 0x85 +#define AXP717_DCDC4_CONTROL 0x86 +#define AXP717_LDO0_OUTPUT_CONTROL 0x90 +#define AXP717_LDO1_OUTPUT_CONTROL 0x91 +#define AXP717_ALDO1_CONTROL 0x93 +#define AXP717_ALDO2_CONTROL 0x94 +#define AXP717_ALDO3_CONTROL 0x95 +#define AXP717_ALDO4_CONTROL 0x96 +#define AXP717_BLDO1_CONTROL 0x97 +#define AXP717_BLDO2_CONTROL 0x98 +#define AXP717_BLDO3_CONTROL 0x99 +#define AXP717_BLDO4_CONTROL 0x9a +#define AXP717_CLDO1_CONTROL 0x9b +#define AXP717_CLDO2_CONTROL 0x9c +#define AXP717_CLDO3_CONTROL 0x9d +#define AXP717_CLDO4_CONTROL 0x9e +#define AXP717_CPUSLDO_CONTROL 0x9f + #define AXP806_STARTUP_SRC 0x00 #define AXP806_CHIP_ID 0x03 #define AXP806_PWR_OUT_CTRL1 0x10 @@ -434,6 +467,27 @@ enum { }; enum { + AXP717_DCDC1 = 0, + AXP717_DCDC2, + AXP717_DCDC3, + AXP717_DCDC4, + AXP717_ALDO1, + AXP717_ALDO2, + AXP717_ALDO3, + AXP717_ALDO4, + AXP717_BLDO1, + AXP717_BLDO2, + AXP717_BLDO3, + AXP717_BLDO4, + AXP717_CLDO1, + AXP717_CLDO2, + AXP717_CLDO3, + AXP717_CLDO4, + AXP717_CPUSLDO, + AXP717_REG_ID_MAX, +}; + +enum { AXP806_DCDCA = 0, AXP806_DCDCB, AXP806_DCDCC, @@ -732,6 +786,40 @@ enum axp313a_irqs { AXP313A_IRQ_PEK_RIS_EDGE, }; +enum axp717_irqs { + AXP717_IRQ_VBUS_FAULT, + AXP717_IRQ_VBUS_OVER_V, + AXP717_IRQ_BOOST_OVER_V, + AXP717_IRQ_GAUGE_NEW_SOC = 4, + AXP717_IRQ_SOC_DROP_LVL1 = 6, + AXP717_IRQ_SOC_DROP_LVL2, + AXP717_IRQ_PEK_RIS_EDGE, + AXP717_IRQ_PEK_FAL_EDGE, + AXP717_IRQ_PEK_LONG, + AXP717_IRQ_PEK_SHORT, + AXP717_IRQ_BATT_REMOVAL, + AXP717_IRQ_BATT_PLUGIN, + AXP717_IRQ_VBUS_REMOVAL, + AXP717_IRQ_VBUS_PLUGIN, + AXP717_IRQ_BATT_OVER_V, + AXP717_IRQ_CHARG_TIMER, + AXP717_IRQ_DIE_TEMP_HIGH, + AXP717_IRQ_CHARG, + AXP717_IRQ_CHARG_DONE, + AXP717_IRQ_BATT_OVER_CURR, + AXP717_IRQ_LDO_OVER_CURR, + AXP717_IRQ_WDOG_EXPIRE, + AXP717_IRQ_BATT_ACT_TEMP_LOW, + AXP717_IRQ_BATT_ACT_TEMP_HIGH, + AXP717_IRQ_BATT_CHG_TEMP_LOW, + AXP717_IRQ_BATT_CHG_TEMP_HIGH, + AXP717_IRQ_BATT_QUIT_TEMP_HIGH, + AXP717_IRQ_BC_USB_CHNG = 30, + AXP717_IRQ_BC_USB_DONE, + AXP717_IRQ_TYPEC_PLUGIN = 37, + AXP717_IRQ_TYPEC_REMOVE, +}; + enum axp803_irqs { AXP803_IRQ_ACIN_OVER_V = 1, AXP803_IRQ_ACIN_PLUGIN, diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h index ee66c97510..988f1cd900 100644 --- a/include/linux/mfd/intel-m10-bmc.h +++ b/include/linux/mfd/intel-m10-bmc.h @@ -205,6 +205,7 @@ struct m10bmc_csr_map { unsigned int pr_reh_addr; unsigned int pr_magic; unsigned int rsu_update_counter; + unsigned int staging_size; }; /** diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h index 51b47966a0..fd17bec2a3 100644 --- a/include/linux/mfd/lp8788.h +++ b/include/linux/mfd/lp8788.h @@ -11,7 +11,6 @@ #define __MFD_LP8788_H__ #include <linux/irqdomain.h> -#include <linux/pwm.h> #include <linux/regmap.h> #define LP8788_DEV_BUCK "lp8788-buck" @@ -87,12 +86,6 @@ enum lp8788_charger_event { CHARGER_DETECTED, }; -enum lp8788_bl_ctrl_mode { - LP8788_BL_REGISTER_ONLY, - LP8788_BL_COMB_PWM_BASED, /* PWM + I2C, changed by PWM input */ - LP8788_BL_COMB_REGISTER_BASED, /* PWM + I2C, changed by I2C */ -}; - enum lp8788_bl_dim_mode { LP8788_DIM_EXPONENTIAL, LP8788_DIM_LINEAR, @@ -202,31 +195,6 @@ struct lp8788_charger_platform_data { }; /* - * struct lp8788_backlight_platform_data - * @name : backlight driver name. (default: "lcd-backlight") - * @initial_brightness : initial value of backlight brightness - * @bl_mode : brightness control by pwm or lp8788 register - * @dim_mode : dimming mode selection - * @full_scale : full scale current setting - * @rise_time : brightness ramp up step time - * @fall_time : brightness ramp down step time - * @pwm_pol : pwm polarity setting when bl_mode is pwm based - * @period_ns : platform specific pwm period value. unit is nano. - Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED - */ -struct lp8788_backlight_platform_data { - char *name; - int initial_brightness; - enum lp8788_bl_ctrl_mode bl_mode; - enum lp8788_bl_dim_mode dim_mode; - enum lp8788_bl_full_scale_current full_scale; - enum lp8788_bl_ramp_step rise_time; - enum lp8788_bl_ramp_step fall_time; - enum pwm_polarity pwm_pol; - unsigned int period_ns; -}; - -/* * struct lp8788_led_platform_data * @name : led driver name. (default: "keyboard-backlight") * @scale : current scale @@ -267,7 +235,6 @@ struct lp8788_vib_platform_data { * @buck2_dvs : configurations for buck2 dvs * @chg_pdata : platform data for charger driver * @alarm_sel : rtc alarm selection (1 or 2) - * @bl_pdata : configurable data for backlight driver * @led_pdata : configurable data for led driver * @vib_pdata : configurable data for vibrator driver * @adc_pdata : iio map data for adc driver @@ -289,9 +256,6 @@ struct lp8788_platform_data { /* rtc alarm */ enum lp8788_alarm_sel alarm_sel; - /* backlight */ - struct lp8788_backlight_platform_data *bl_pdata; - /* current sinks */ struct lp8788_led_platform_data *led_pdata; struct lp8788_vib_platform_data *vib_pdata; diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 78e167a924..69cbea78b4 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -113,6 +113,148 @@ enum rk808_reg { #define RK808_INT_STS_MSK_REG2 0x4f #define RK808_IO_POL_REG 0x50 +/* RK816 */ +enum rk816_reg { + RK816_ID_DCDC1, + RK816_ID_DCDC2, + RK816_ID_DCDC3, + RK816_ID_DCDC4, + RK816_ID_LDO1, + RK816_ID_LDO2, + RK816_ID_LDO3, + RK816_ID_LDO4, + RK816_ID_LDO5, + RK816_ID_LDO6, + RK816_ID_BOOST, + RK816_ID_OTG_SW, +}; + +enum rk816_irqs { + /* INT_STS_REG1 */ + RK816_IRQ_PWRON_FALL, + RK816_IRQ_PWRON_RISE, + + /* INT_STS_REG2 */ + RK816_IRQ_VB_LOW, + RK816_IRQ_PWRON, + RK816_IRQ_PWRON_LP, + RK816_IRQ_HOTDIE, + RK816_IRQ_RTC_ALARM, + RK816_IRQ_RTC_PERIOD, + RK816_IRQ_USB_OV, + + /* INT_STS_REG3 */ + RK816_IRQ_PLUG_IN, + RK816_IRQ_PLUG_OUT, + RK816_IRQ_CHG_OK, + RK816_IRQ_CHG_TE, + RK816_IRQ_CHG_TS, + RK816_IRQ_CHG_CVTLIM, + RK816_IRQ_DISCHG_ILIM, +}; + +/* power channel registers */ +#define RK816_DCDC_EN_REG1 0x23 + +#define RK816_DCDC_EN_REG2 0x24 +#define RK816_BOOST_EN BIT(1) +#define RK816_OTG_EN BIT(2) +#define RK816_BOOST_EN_MSK BIT(5) +#define RK816_OTG_EN_MSK BIT(6) +#define RK816_BUCK_DVS_CONFIRM BIT(7) + +#define RK816_LDO_EN_REG1 0x27 + +#define RK816_LDO_EN_REG2 0x28 + +/* interrupt registers and irq definitions */ +#define RK816_INT_STS_REG1 0x49 +#define RK816_INT_STS_MSK_REG1 0x4a +#define RK816_INT_STS_PWRON_FALL BIT(5) +#define RK816_INT_STS_PWRON_RISE BIT(6) + +#define RK816_INT_STS_REG2 0x4c +#define RK816_INT_STS_MSK_REG2 0x4d +#define RK816_INT_STS_VB_LOW BIT(1) +#define RK816_INT_STS_PWRON BIT(2) +#define RK816_INT_STS_PWRON_LP BIT(3) +#define RK816_INT_STS_HOTDIE BIT(4) +#define RK816_INT_STS_RTC_ALARM BIT(5) +#define RK816_INT_STS_RTC_PERIOD BIT(6) +#define RK816_INT_STS_USB_OV BIT(7) + +#define RK816_INT_STS_REG3 0x4e +#define RK816_INT_STS_MSK_REG3 0x4f +#define RK816_INT_STS_PLUG_IN BIT(0) +#define RK816_INT_STS_PLUG_OUT BIT(1) +#define RK816_INT_STS_CHG_OK BIT(2) +#define RK816_INT_STS_CHG_TE BIT(3) +#define RK816_INT_STS_CHG_TS BIT(4) +#define RK816_INT_STS_CHG_CVTLIM BIT(6) +#define RK816_INT_STS_DISCHG_ILIM BIT(7) + +#define RK816_IRQ_STS_OFFSET(x) ((x) - RK816_INT_STS_REG1) +#define RK816_IRQ_MSK_OFFSET(x) ((x) - RK816_INT_STS_MSK_REG1) + +/* charger, boost and OTG registers */ +#define RK816_OTG_BUCK_LDO_CONFIG_REG 0x2a +#define RK816_CHRG_CONFIG_REG 0x2b +#define RK816_BOOST_ON_VESL_REG 0x54 +#define RK816_BOOST_SLP_VSEL_REG 0x55 +#define RK816_CHRG_BOOST_CONFIG_REG 0x9a +#define RK816_SUP_STS_REG 0xa0 +#define RK816_USB_CTRL_REG 0xa1 +#define RK816_CHRG_CTRL(x) (0xa3 + (x)) +#define RK816_BAT_CTRL_REG 0xa6 +#define RK816_BAT_HTS_TS_REG 0xa8 +#define RK816_BAT_LTS_TS_REG 0xa9 + +/* adc and fuel gauge registers */ +#define RK816_TS_CTRL_REG 0xac +#define RK816_ADC_CTRL_REG 0xad +#define RK816_GGCON_REG 0xb0 +#define RK816_GGSTS_REG 0xb1 +#define RK816_ZERO_CUR_ADC_REGH 0xb2 +#define RK816_ZERO_CUR_ADC_REGL 0xb3 +#define RK816_GASCNT_CAL_REG(x) (0xb7 - (x)) +#define RK816_GASCNT_REG(x) (0xbb - (x)) +#define RK816_BAT_CUR_AVG_REGH 0xbc +#define RK816_BAT_CUR_AVG_REGL 0xbd +#define RK816_TS_ADC_REGH 0xbe +#define RK816_TS_ADC_REGL 0xbf +#define RK816_USB_ADC_REGH 0xc0 +#define RK816_USB_ADC_REGL 0xc1 +#define RK816_BAT_OCV_REGH 0xc2 +#define RK816_BAT_OCV_REGL 0xc3 +#define RK816_BAT_VOL_REGH 0xc4 +#define RK816_BAT_VOL_REGL 0xc5 +#define RK816_RELAX_ENTRY_THRES_REGH 0xc6 +#define RK816_RELAX_ENTRY_THRES_REGL 0xc7 +#define RK816_RELAX_EXIT_THRES_REGH 0xc8 +#define RK816_RELAX_EXIT_THRES_REGL 0xc9 +#define RK816_RELAX_VOL1_REGH 0xca +#define RK816_RELAX_VOL1_REGL 0xcb +#define RK816_RELAX_VOL2_REGH 0xcc +#define RK816_RELAX_VOL2_REGL 0xcd +#define RK816_RELAX_CUR1_REGH 0xce +#define RK816_RELAX_CUR1_REGL 0xcf +#define RK816_RELAX_CUR2_REGH 0xd0 +#define RK816_RELAX_CUR2_REGL 0xd1 +#define RK816_CAL_OFFSET_REGH 0xd2 +#define RK816_CAL_OFFSET_REGL 0xd3 +#define RK816_NON_ACT_TIMER_CNT_REG 0xd4 +#define RK816_VCALIB0_REGH 0xd5 +#define RK816_VCALIB0_REGL 0xd6 +#define RK816_VCALIB1_REGH 0xd7 +#define RK816_VCALIB1_REGL 0xd8 +#define RK816_FCC_GASCNT_REG(x) (0xdc - (x)) +#define RK816_IOFFSET_REGH 0xdd +#define RK816_IOFFSET_REGL 0xde +#define RK816_SLEEP_CON_SAMP_CUR_REG 0xdf + +/* general purpose data registers 0xe0 ~ 0xf2 */ +#define RK816_DATA_REG(x) (0xe0 + (x)) + /* RK818 */ #define RK818_DCDC1 0 #define RK818_LDO1 4 @@ -791,6 +933,7 @@ enum rk806_dvs_mode { #define VOUT_LO_INT BIT(0) #define CLK32KOUT2_EN BIT(0) +#define TEMP105C 0x08 #define TEMP115C 0x0c #define TEMP_HOTDIE_MSK 0x0c #define SLP_SD_MSK (0x3 << 2) @@ -1191,6 +1334,7 @@ enum { RK806_ID = 0x8060, RK808_ID = 0x0000, RK809_ID = 0x8090, + RK816_ID = 0x8160, RK817_ID = 0x8170, RK818_ID = 0x8180, }; diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h index 3b5f3a7db4..ce786c9640 100644 --- a/include/linux/mfd/rohm-bd71828.h +++ b/include/linux/mfd/rohm-bd71828.h @@ -4,6 +4,7 @@ #ifndef __LINUX_MFD_BD71828_H__ #define __LINUX_MFD_BD71828_H__ +#include <linux/bits.h> #include <linux/mfd/rohm-generic.h> #include <linux/mfd/rohm-shared.h> @@ -41,7 +42,8 @@ enum { #define BD71828_REG_PS_CTRL_2 0x05 #define BD71828_REG_PS_CTRL_3 0x06 -//#define BD71828_REG_SWRESET 0x06 +#define BD71828_MASK_STATE_HBNT BIT(1) + #define BD71828_MASK_RUN_LVL_CTRL 0x30 /* Regulator control masks */ @@ -133,7 +135,6 @@ enum { #define BD71828_REG_LDO5_VOLT 0x43 #define BD71828_REG_LDO5_VOLT_OPT 0x42 #define BD71828_REG_LDO6_EN 0x44 -//#define BD71828_REG_LDO6_VOLT 0x4 #define BD71828_REG_LDO7_EN 0x45 #define BD71828_REG_LDO7_VOLT 0x46 diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h index ca35af3074..9eb17481b0 100644 --- a/include/linux/mfd/stm32-timers.h +++ b/include/linux/mfd/stm32-timers.h @@ -41,6 +41,11 @@ #define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */ #define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */ #define TIM_DIER_UIE BIT(0) /* Update interrupt */ +#define TIM_DIER_CC1IE BIT(1) /* CC1 Interrupt Enable */ +#define TIM_DIER_CC2IE BIT(2) /* CC2 Interrupt Enable */ +#define TIM_DIER_CC3IE BIT(3) /* CC3 Interrupt Enable */ +#define TIM_DIER_CC4IE BIT(4) /* CC4 Interrupt Enable */ +#define TIM_DIER_CC_IE(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt enable */ #define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */ #define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */ #define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */ @@ -49,6 +54,7 @@ #define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */ #define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */ #define TIM_SR_UIF BIT(0) /* Update interrupt flag */ +#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */ #define TIM_EGR_UG BIT(0) /* Update Generation */ #define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */ #define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */ @@ -60,16 +66,23 @@ #define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */ #define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */ #define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */ +#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */ +#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */ +#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */ +#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */ #define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */ #define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */ #define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */ #define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */ #define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */ #define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */ +#define TIM_CCER_CC2NP BIT(7) /* Capt/Comp 2N Polarity */ #define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */ #define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */ +#define TIM_CCER_CC3NP BIT(11) /* Capt/Comp 3N Polarity */ #define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */ #define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */ +#define TIM_CCER_CC4NP BIT(15) /* Capt/Comp 4N Polarity */ #define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12)) #define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */ #define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */ diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h index 3f7c5e23cd..16543fd4d8 100644 --- a/include/linux/mfd/tps6594.h +++ b/include/linux/mfd/tps6594.h @@ -18,12 +18,13 @@ enum pmic_id { TPS6594, TPS6593, LP8764, + TPS65224, }; /* Macro to get page index from register address */ #define TPS6594_REG_TO_PAGE(reg) ((reg) >> 8) -/* Registers for page 0 of TPS6594 */ +/* Registers for page 0 */ #define TPS6594_REG_DEV_REV 0x01 #define TPS6594_REG_NVM_CODE_1 0x02 @@ -56,9 +57,6 @@ enum pmic_id { #define TPS6594_REG_GPIOX_OUT(gpio_inst) (TPS6594_REG_GPIO_OUT_1 + (gpio_inst) / 8) #define TPS6594_REG_GPIOX_IN(gpio_inst) (TPS6594_REG_GPIO_IN_1 + (gpio_inst) / 8) -#define TPS6594_REG_GPIO_IN_1 0x3f -#define TPS6594_REG_GPIO_IN_2 0x40 - #define TPS6594_REG_RAIL_SEL_1 0x41 #define TPS6594_REG_RAIL_SEL_2 0x42 #define TPS6594_REG_RAIL_SEL_3 0x43 @@ -70,13 +68,15 @@ enum pmic_id { #define TPS6594_REG_FSM_TRIG_MASK_3 0x48 #define TPS6594_REG_MASK_BUCK1_2 0x49 +#define TPS65224_REG_MASK_BUCKS 0x49 #define TPS6594_REG_MASK_BUCK3_4 0x4a #define TPS6594_REG_MASK_BUCK5 0x4b #define TPS6594_REG_MASK_LDO1_2 0x4c +#define TPS65224_REG_MASK_LDOS 0x4c #define TPS6594_REG_MASK_LDO3_4 0x4d #define TPS6594_REG_MASK_VMON 0x4e -#define TPS6594_REG_MASK_GPIO1_8_FALL 0x4f -#define TPS6594_REG_MASK_GPIO1_8_RISE 0x50 +#define TPS6594_REG_MASK_GPIO_FALL 0x4f +#define TPS6594_REG_MASK_GPIO_RISE 0x50 #define TPS6594_REG_MASK_GPIO9_11 0x51 #define TPS6594_REG_MASK_STARTUP 0x52 #define TPS6594_REG_MASK_MISC 0x53 @@ -174,6 +174,10 @@ enum pmic_id { #define TPS6594_REG_REGISTER_LOCK 0xa1 +#define TPS65224_REG_SRAM_ACCESS_1 0xa2 +#define TPS65224_REG_SRAM_ACCESS_2 0xa3 +#define TPS65224_REG_SRAM_ADDR_CTRL 0xa4 +#define TPS65224_REG_RECOV_CNT_PFSM_INCR 0xa5 #define TPS6594_REG_MANUFACTURING_VER 0xa6 #define TPS6594_REG_CUSTOMER_NVM_ID_REG 0xa7 @@ -182,6 +186,9 @@ enum pmic_id { #define TPS6594_REG_SOFT_REBOOT_REG 0xab +#define TPS65224_REG_ADC_CTRL 0xac +#define TPS65224_REG_ADC_RESULT_REG_1 0xad +#define TPS65224_REG_ADC_RESULT_REG_2 0xae #define TPS6594_REG_RTC_SECONDS 0xb5 #define TPS6594_REG_RTC_MINUTES 0xb6 #define TPS6594_REG_RTC_HOURS 0xb7 @@ -199,6 +206,7 @@ enum pmic_id { #define TPS6594_REG_RTC_CTRL_1 0xc2 #define TPS6594_REG_RTC_CTRL_2 0xc3 +#define TPS65224_REG_STARTUP_CTRL 0xc3 #define TPS6594_REG_RTC_STATUS 0xc4 #define TPS6594_REG_RTC_INTERRUPTS 0xc5 #define TPS6594_REG_RTC_COMP_LSB 0xc6 @@ -214,13 +222,17 @@ enum pmic_id { #define TPS6594_REG_PFSM_DELAY_REG_2 0xce #define TPS6594_REG_PFSM_DELAY_REG_3 0xcf #define TPS6594_REG_PFSM_DELAY_REG_4 0xd0 +#define TPS65224_REG_ADC_GAIN_COMP_REG 0xd0 +#define TPS65224_REG_CRC_CALC_CONTROL 0xef +#define TPS65224_REG_REGMAP_USER_CRC_LOW 0xf0 +#define TPS65224_REG_REGMAP_USER_CRC_HIGH 0xf1 -/* Registers for page 1 of TPS6594 */ +/* Registers for page 1 */ #define TPS6594_REG_SERIAL_IF_CONFIG 0x11a #define TPS6594_REG_I2C1_ID 0x122 #define TPS6594_REG_I2C2_ID 0x123 -/* Registers for page 4 of TPS6594 */ +/* Registers for page 4 */ #define TPS6594_REG_WD_ANSWER_REG 0x401 #define TPS6594_REG_WD_QUESTION_ANSW_CNT 0x402 #define TPS6594_REG_WD_WIN1_CFG 0x403 @@ -241,16 +253,26 @@ enum pmic_id { #define TPS6594_BIT_BUCK_PLDN BIT(5) #define TPS6594_BIT_BUCK_RV_SEL BIT(7) -/* BUCKX_CONF register field definition */ +/* TPS6594 BUCKX_CONF register field definition */ #define TPS6594_MASK_BUCK_SLEW_RATE GENMASK(2, 0) #define TPS6594_MASK_BUCK_ILIM GENMASK(5, 3) -/* BUCKX_PG_WINDOW register field definition */ +/* TPS65224 BUCKX_CONF register field definition */ +#define TPS65224_MASK_BUCK_SLEW_RATE GENMASK(1, 0) + +/* TPS6594 BUCKX_PG_WINDOW register field definition */ #define TPS6594_MASK_BUCK_OV_THR GENMASK(2, 0) #define TPS6594_MASK_BUCK_UV_THR GENMASK(5, 3) -/* BUCKX VSET */ -#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0) +/* TPS65224 BUCKX_PG_WINDOW register field definition */ +#define TPS65224_MASK_BUCK_VMON_THR GENMASK(1, 0) + +/* TPS6594 BUCKX_VOUT register field definition */ +#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0) + +/* TPS65224 BUCKX_VOUT register field definition */ +#define TPS65224_MASK_BUCK1_VSET GENMASK(7, 0) +#define TPS65224_MASK_BUCKS_VSET GENMASK(6, 0) /* LDOX_CTRL register field definition */ #define TPS6594_BIT_LDO_EN BIT(0) @@ -258,6 +280,7 @@ enum pmic_id { #define TPS6594_BIT_LDO_VMON_EN BIT(4) #define TPS6594_MASK_LDO_PLDN GENMASK(6, 5) #define TPS6594_BIT_LDO_RV_SEL BIT(7) +#define TPS65224_BIT_LDO_DISCHARGE_EN BIT(5) /* LDORTC_CTRL register field definition */ #define TPS6594_BIT_LDORTC_DIS BIT(0) @@ -271,6 +294,9 @@ enum pmic_id { #define TPS6594_MASK_LDO_OV_THR GENMASK(2, 0) #define TPS6594_MASK_LDO_UV_THR GENMASK(5, 3) +/* LDOX_PG_WINDOW register field definition */ +#define TPS65224_MASK_LDO_VMON_THR GENMASK(1, 0) + /* VCCA_VMON_CTRL register field definition */ #define TPS6594_BIT_VMON_EN BIT(0) #define TPS6594_BIT_VMON1_EN BIT(1) @@ -278,10 +304,12 @@ enum pmic_id { #define TPS6594_BIT_VMON2_EN BIT(3) #define TPS6594_BIT_VMON2_RV_SEL BIT(4) #define TPS6594_BIT_VMON_DEGLITCH_SEL BIT(5) +#define TPS65224_BIT_VMON_DEGLITCH_SEL GENMASK(7, 5) /* VCCA_PG_WINDOW register field definition */ #define TPS6594_MASK_VCCA_OV_THR GENMASK(2, 0) #define TPS6594_MASK_VCCA_UV_THR GENMASK(5, 3) +#define TPS65224_MASK_VCCA_VMON_THR GENMASK(1, 0) #define TPS6594_BIT_VCCA_PG_SET BIT(6) /* VMONX_PG_WINDOW register field definition */ @@ -289,6 +317,9 @@ enum pmic_id { #define TPS6594_MASK_VMONX_UV_THR GENMASK(5, 3) #define TPS6594_BIT_VMONX_RANGE BIT(6) +/* VMONX_PG_WINDOW register field definition */ +#define TPS65224_MASK_VMONX_THR GENMASK(1, 0) + /* GPIOX_CONF register field definition */ #define TPS6594_BIT_GPIO_DIR BIT(0) #define TPS6594_BIT_GPIO_OD BIT(1) @@ -296,6 +327,8 @@ enum pmic_id { #define TPS6594_BIT_GPIO_PU_PD_EN BIT(3) #define TPS6594_BIT_GPIO_DEGLITCH_EN BIT(4) #define TPS6594_MASK_GPIO_SEL GENMASK(7, 5) +#define TPS65224_MASK_GPIO_SEL GENMASK(6, 5) +#define TPS65224_MASK_GPIO_SEL_GPIO6 GENMASK(7, 5) /* NPWRON_CONF register field definition */ #define TPS6594_BIT_NRSTOUT_OD BIT(0) @@ -305,6 +338,12 @@ enum pmic_id { #define TPS6594_BIT_ENABLE_POL BIT(5) #define TPS6594_MASK_NPWRON_SEL GENMASK(7, 6) +/* POWER_ON_CONFIG register field definition */ +#define TPS65224_BIT_NINT_ENDRV_PU_SEL BIT(0) +#define TPS65224_BIT_NINT_ENDRV_SEL BIT(1) +#define TPS65224_BIT_EN_PB_DEGL BIT(5) +#define TPS65224_MASK_EN_PB_VSENSE_CONFIG GENMASK(7, 6) + /* GPIO_OUT_X register field definition */ #define TPS6594_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst) % 8) @@ -312,6 +351,12 @@ enum pmic_id { #define TPS6594_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst) % 8) #define TPS6594_BIT_NPWRON_IN BIT(3) +/* GPIO_OUT_X register field definition */ +#define TPS65224_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst)) + +/* GPIO_IN_X register field definition */ +#define TPS65224_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst)) + /* RAIL_SEL_1 register field definition */ #define TPS6594_MASK_BUCK1_GRP_SEL GENMASK(1, 0) #define TPS6594_MASK_BUCK2_GRP_SEL GENMASK(3, 2) @@ -343,6 +388,9 @@ enum pmic_id { #define TPS6594_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 8) #define TPS6594_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 8 + 1) +#define TPS65224_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 6) +#define TPS65224_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 6 + 1) + /* MASK_BUCKX register field definition */ #define TPS6594_BIT_BUCKX_OV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8) #define TPS6594_BIT_BUCKX_UV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 1) @@ -361,22 +409,46 @@ enum pmic_id { #define TPS6594_BIT_VMON2_OV_MASK BIT(5) #define TPS6594_BIT_VMON2_UV_MASK BIT(6) +/* MASK_BUCK Register field definition */ +#define TPS65224_BIT_BUCK1_UVOV_MASK BIT(0) +#define TPS65224_BIT_BUCK2_UVOV_MASK BIT(1) +#define TPS65224_BIT_BUCK3_UVOV_MASK BIT(2) +#define TPS65224_BIT_BUCK4_UVOV_MASK BIT(4) + +/* MASK_LDO_VMON register field definition */ +#define TPS65224_BIT_LDO1_UVOV_MASK BIT(0) +#define TPS65224_BIT_LDO2_UVOV_MASK BIT(1) +#define TPS65224_BIT_LDO3_UVOV_MASK BIT(2) +#define TPS65224_BIT_VCCA_UVOV_MASK BIT(4) +#define TPS65224_BIT_VMON1_UVOV_MASK BIT(5) +#define TPS65224_BIT_VMON2_UVOV_MASK BIT(6) + /* MASK_GPIOX register field definition */ #define TPS6594_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \ (gpio_inst) : (gpio_inst) % 8) #define TPS6594_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \ (gpio_inst) : (gpio_inst) % 8 + 3) +/* MASK_GPIOX register field definition */ +#define TPS65224_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst)) +#define TPS65224_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst)) /* MASK_STARTUP register field definition */ #define TPS6594_BIT_NPWRON_START_MASK BIT(0) #define TPS6594_BIT_ENABLE_MASK BIT(1) #define TPS6594_BIT_FSD_MASK BIT(4) #define TPS6594_BIT_SOFT_REBOOT_MASK BIT(5) +#define TPS65224_BIT_VSENSE_MASK BIT(0) +#define TPS65224_BIT_PB_SHORT_MASK BIT(2) /* MASK_MISC register field definition */ #define TPS6594_BIT_BIST_PASS_MASK BIT(0) #define TPS6594_BIT_EXT_CLK_MASK BIT(1) +#define TPS65224_BIT_REG_UNLOCK_MASK BIT(2) #define TPS6594_BIT_TWARN_MASK BIT(3) +#define TPS65224_BIT_PB_LONG_MASK BIT(4) +#define TPS65224_BIT_PB_FALL_MASK BIT(5) +#define TPS65224_BIT_PB_RISE_MASK BIT(6) +#define TPS65224_BIT_ADC_CONV_READY_MASK BIT(7) /* MASK_MODERATE_ERR register field definition */ #define TPS6594_BIT_BIST_FAIL_MASK BIT(1) @@ -391,6 +463,8 @@ enum pmic_id { #define TPS6594_BIT_ORD_SHUTDOWN_MASK BIT(1) #define TPS6594_BIT_MCU_PWR_ERR_MASK BIT(2) #define TPS6594_BIT_SOC_PWR_ERR_MASK BIT(3) +#define TPS65224_BIT_COMM_ERR_MASK BIT(4) +#define TPS65224_BIT_I2C2_ERR_MASK BIT(5) /* MASK_COMM_ERR register field definition */ #define TPS6594_BIT_COMM_FRM_ERR_MASK BIT(0) @@ -426,6 +500,12 @@ enum pmic_id { #define TPS6594_BIT_BUCK3_4_INT BIT(1) #define TPS6594_BIT_BUCK5_INT BIT(2) +/* INT_BUCK register field definition */ +#define TPS65224_BIT_BUCK1_UVOV_INT BIT(0) +#define TPS65224_BIT_BUCK2_UVOV_INT BIT(1) +#define TPS65224_BIT_BUCK3_UVOV_INT BIT(2) +#define TPS65224_BIT_BUCK4_UVOV_INT BIT(3) + /* INT_BUCKX register field definition */ #define TPS6594_BIT_BUCKX_OV_INT(buck_inst) BIT(((buck_inst) << 2) % 8) #define TPS6594_BIT_BUCKX_UV_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1) @@ -437,6 +517,14 @@ enum pmic_id { #define TPS6594_BIT_LDO3_4_INT BIT(1) #define TPS6594_BIT_VCCA_INT BIT(4) +/* INT_LDO_VMON register field definition */ +#define TPS65224_BIT_LDO1_UVOV_INT BIT(0) +#define TPS65224_BIT_LDO2_UVOV_INT BIT(1) +#define TPS65224_BIT_LDO3_UVOV_INT BIT(2) +#define TPS65224_BIT_VCCA_UVOV_INT BIT(4) +#define TPS65224_BIT_VMON1_UVOV_INT BIT(5) +#define TPS65224_BIT_VMON2_UVOV_INT BIT(6) + /* INT_LDOX register field definition */ #define TPS6594_BIT_LDOX_OV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8) #define TPS6594_BIT_LDOX_UV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1) @@ -462,17 +550,32 @@ enum pmic_id { /* INT_GPIOX register field definition */ #define TPS6594_BIT_GPIOX_INT(gpio_inst) BIT(gpio_inst) +/* INT_GPIO register field definition */ +#define TPS65224_BIT_GPIO1_INT BIT(0) +#define TPS65224_BIT_GPIO2_INT BIT(1) +#define TPS65224_BIT_GPIO3_INT BIT(2) +#define TPS65224_BIT_GPIO4_INT BIT(3) +#define TPS65224_BIT_GPIO5_INT BIT(4) +#define TPS65224_BIT_GPIO6_INT BIT(5) + /* INT_STARTUP register field definition */ #define TPS6594_BIT_NPWRON_START_INT BIT(0) +#define TPS65224_BIT_VSENSE_INT BIT(0) #define TPS6594_BIT_ENABLE_INT BIT(1) #define TPS6594_BIT_RTC_INT BIT(2) +#define TPS65224_BIT_PB_SHORT_INT BIT(2) #define TPS6594_BIT_FSD_INT BIT(4) #define TPS6594_BIT_SOFT_REBOOT_INT BIT(5) /* INT_MISC register field definition */ #define TPS6594_BIT_BIST_PASS_INT BIT(0) #define TPS6594_BIT_EXT_CLK_INT BIT(1) +#define TPS65224_BIT_REG_UNLOCK_INT BIT(2) #define TPS6594_BIT_TWARN_INT BIT(3) +#define TPS65224_BIT_PB_LONG_INT BIT(4) +#define TPS65224_BIT_PB_FALL_INT BIT(5) +#define TPS65224_BIT_PB_RISE_INT BIT(6) +#define TPS65224_BIT_ADC_CONV_READY_INT BIT(7) /* INT_MODERATE_ERR register field definition */ #define TPS6594_BIT_TSD_ORD_INT BIT(0) @@ -488,6 +591,7 @@ enum pmic_id { #define TPS6594_BIT_TSD_IMM_INT BIT(0) #define TPS6594_BIT_VCCA_OVP_INT BIT(1) #define TPS6594_BIT_PFSM_ERR_INT BIT(2) +#define TPS65224_BIT_BG_XMON_INT BIT(3) /* INT_FSM_ERR register field definition */ #define TPS6594_BIT_IMM_SHUTDOWN_INT BIT(0) @@ -496,6 +600,7 @@ enum pmic_id { #define TPS6594_BIT_SOC_PWR_ERR_INT BIT(3) #define TPS6594_BIT_COMM_ERR_INT BIT(4) #define TPS6594_BIT_READBACK_ERR_INT BIT(5) +#define TPS65224_BIT_I2C2_ERR_INT BIT(5) #define TPS6594_BIT_ESM_INT BIT(6) #define TPS6594_BIT_WD_INT BIT(7) @@ -536,8 +641,18 @@ enum pmic_id { #define TPS6594_BIT_VMON2_OV_STAT BIT(5) #define TPS6594_BIT_VMON2_UV_STAT BIT(6) +/* STAT_LDO_VMON register field definition */ +#define TPS65224_BIT_LDO1_UVOV_STAT BIT(0) +#define TPS65224_BIT_LDO2_UVOV_STAT BIT(1) +#define TPS65224_BIT_LDO3_UVOV_STAT BIT(2) +#define TPS65224_BIT_VCCA_UVOV_STAT BIT(4) +#define TPS65224_BIT_VMON1_UVOV_STAT BIT(5) +#define TPS65224_BIT_VMON2_UVOV_STAT BIT(6) + /* STAT_STARTUP register field definition */ +#define TPS65224_BIT_VSENSE_STAT BIT(0) #define TPS6594_BIT_ENABLE_STAT BIT(1) +#define TPS65224_BIT_PB_LEVEL_STAT BIT(2) /* STAT_MISC register field definition */ #define TPS6594_BIT_EXT_CLK_STAT BIT(1) @@ -549,6 +664,7 @@ enum pmic_id { /* STAT_SEVERE_ERR register field definition */ #define TPS6594_BIT_TSD_IMM_STAT BIT(0) #define TPS6594_BIT_VCCA_OVP_STAT BIT(1) +#define TPS65224_BIT_BG_XMON_STAT BIT(3) /* STAT_READBACK_ERR register field definition */ #define TPS6594_BIT_EN_DRV_READBACK_STAT BIT(0) @@ -597,6 +713,8 @@ enum pmic_id { #define TPS6594_BIT_BB_CHARGER_EN BIT(0) #define TPS6594_BIT_BB_ICHR BIT(1) #define TPS6594_MASK_BB_VEOC GENMASK(3, 2) +#define TPS65224_BIT_I2C1_SPI_CRC_EN BIT(4) +#define TPS65224_BIT_I2C2_CRC_EN BIT(5) #define TPS6594_BB_EOC_RDY BIT(7) /* ENABLE_DRV_REG register field definition */ @@ -617,6 +735,7 @@ enum pmic_id { #define TPS6594_BIT_NRSTOUT_SOC_IN BIT(2) #define TPS6594_BIT_FORCE_EN_DRV_LOW BIT(3) #define TPS6594_BIT_SPMI_LPM_EN BIT(4) +#define TPS65224_BIT_TSD_DISABLE BIT(5) /* RECOV_CNT_REG_1 register field definition */ #define TPS6594_MASK_RECOV_CNT GENMASK(3, 0) @@ -671,15 +790,27 @@ enum pmic_id { /* ESM_SOC_START_REG register field definition */ #define TPS6594_BIT_ESM_SOC_START BIT(0) +/* ESM_MCU_START_REG register field definition */ +#define TPS65224_BIT_ESM_MCU_START BIT(0) + /* ESM_SOC_MODE_CFG register field definition */ #define TPS6594_MASK_ESM_SOC_ERR_CNT_TH GENMASK(3, 0) #define TPS6594_BIT_ESM_SOC_ENDRV BIT(5) #define TPS6594_BIT_ESM_SOC_EN BIT(6) #define TPS6594_BIT_ESM_SOC_MODE BIT(7) +/* ESM_MCU_MODE_CFG register field definition */ +#define TPS65224_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0) +#define TPS65224_BIT_ESM_MCU_ENDRV BIT(5) +#define TPS65224_BIT_ESM_MCU_EN BIT(6) +#define TPS65224_BIT_ESM_MCU_MODE BIT(7) + /* ESM_SOC_ERR_CNT_REG register field definition */ #define TPS6594_MASK_ESM_SOC_ERR_CNT GENMASK(4, 0) +/* ESM_MCU_ERR_CNT_REG register field definition */ +#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0) + /* REGISTER_LOCK register field definition */ #define TPS6594_BIT_REGISTER_LOCK_STATUS BIT(0) @@ -687,6 +818,29 @@ enum pmic_id { #define TPS6594_MASK_VMON1_SLEW_RATE GENMASK(2, 0) #define TPS6594_MASK_VMON2_SLEW_RATE GENMASK(5, 3) +/* SRAM_ACCESS_1 Register field definition */ +#define TPS65224_MASk_SRAM_UNLOCK_SEQ GENMASK(7, 0) + +/* SRAM_ACCESS_2 Register field definition */ +#define TPS65224_BIT_SRAM_WRITE_MODE BIT(0) +#define TPS65224_BIT_OTP_PROG_USER BIT(1) +#define TPS65224_BIT_OTP_PROG_PFSM BIT(2) +#define TPS65224_BIT_OTP_PROG_STATUS BIT(3) +#define TPS65224_BIT_SRAM_UNLOCKED BIT(6) +#define TPS65224_USER_PROG_ALLOWED BIT(7) + +/* SRAM_ADDR_CTRL Register field definition */ +#define TPS65224_MASk_SRAM_SEL GENMASK(1, 0) + +/* RECOV_CNT_PFSM_INCR Register field definition */ +#define TPS65224_BIT_INCREMENT_RECOV_CNT BIT(0) + +/* MANUFACTURING_VER Register field definition */ +#define TPS65224_MASK_SILICON_REV GENMASK(7, 0) + +/* CUSTOMER_NVM_ID_REG Register field definition */ +#define TPS65224_MASK_CUSTOMER_NVM_ID GENMASK(7, 0) + /* SOFT_REBOOT_REG register field definition */ #define TPS6594_BIT_SOFT_REBOOT BIT(0) @@ -755,14 +909,83 @@ enum pmic_id { #define TPS6594_BIT_I2C2_CRC_EN BIT(2) #define TPS6594_MASK_T_CRC GENMASK(7, 3) +/* ADC_CTRL Register field definition */ +#define TPS65224_BIT_ADC_START BIT(0) +#define TPS65224_BIT_ADC_CONT_CONV BIT(1) +#define TPS65224_BIT_ADC_THERMAL_SEL BIT(2) +#define TPS65224_BIT_ADC_RDIV_EN BIT(3) +#define TPS65224_BIT_ADC_STATUS BIT(7) + +/* ADC_RESULT_REG_1 Register field definition */ +#define TPS65224_MASK_ADC_RESULT_11_4 GENMASK(7, 0) + +/* ADC_RESULT_REG_2 Register field definition */ +#define TPS65224_MASK_ADC_RESULT_3_0 GENMASK(7, 4) + +/* STARTUP_CTRL Register field definition */ +#define TPS65224_MASK_STARTUP_DEST GENMASK(6, 5) +#define TPS65224_BIT_FIRST_STARTUP_DONE BIT(7) + +/* SCRATCH_PAD_REG_1 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_1 GENMASK(7, 0) + +/* SCRATCH_PAD_REG_2 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_2 GENMASK(7, 0) + +/* SCRATCH_PAD_REG_3 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_3 GENMASK(7, 0) + +/* SCRATCH_PAD_REG_4 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_4 GENMASK(7, 0) + +/* PFSM_DELAY_REG_1 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY1 GENMASK(7, 0) + +/* PFSM_DELAY_REG_2 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY2 GENMASK(7, 0) + +/* PFSM_DELAY_REG_3 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY3 GENMASK(7, 0) + +/* PFSM_DELAY_REG_4 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY4 GENMASK(7, 0) + +/* CRC_CALC_CONTROL Register field definition */ +#define TPS65224_BIT_RUN_CRC_BIST BIT(0) +#define TPS65224_BIT_RUN_CRC_UPDATE BIT(1) + +/* ADC_GAIN_COMP_REG Register field definition */ +#define TPS65224_MASK_ADC_GAIN_COMP GENMASK(7, 0) + +/* REGMAP_USER_CRC_LOW Register field definition */ +#define TPS65224_MASK_REGMAP_USER_CRC16_LOW GENMASK(7, 0) + +/* REGMAP_USER_CRC_HIGH Register field definition */ +#define TPS65224_MASK_REGMAP_USER_CRC16_HIGH GENMASK(7, 0) + +/* WD_ANSWER_REG Register field definition */ +#define TPS6594_MASK_WD_ANSWER GENMASK(7, 0) + /* WD_QUESTION_ANSW_CNT register field definition */ #define TPS6594_MASK_WD_QUESTION GENMASK(3, 0) #define TPS6594_MASK_WD_ANSW_CNT GENMASK(5, 4) +#define TPS65224_BIT_INT_TOP_STATUS BIT(7) + +/* WD WIN1_CFG register field definition */ +#define TPS6594_MASK_WD_WIN1_CFG GENMASK(6, 0) + +/* WD WIN2_CFG register field definition */ +#define TPS6594_MASK_WD_WIN2_CFG GENMASK(6, 0) + +/* WD LongWin register field definition */ +#define TPS6594_MASK_WD_LONGWIN_CFG GENMASK(7, 0) /* WD_MODE_REG register field definition */ #define TPS6594_BIT_WD_RETURN_LONGWIN BIT(0) #define TPS6594_BIT_WD_MODE_SELECT BIT(1) #define TPS6594_BIT_WD_PWRHOLD BIT(2) +#define TPS65224_BIT_WD_ENDRV_SEL BIT(6) +#define TPS65224_BIT_WD_CNT_SEL BIT(7) /* WD_QA_CFG register field definition */ #define TPS6594_MASK_WD_QUESTION_SEED GENMASK(3, 0) @@ -993,6 +1216,106 @@ enum tps6594_irqs { #define TPS6594_IRQ_NAME_ALARM "alarm" #define TPS6594_IRQ_NAME_POWERUP "powerup" +/* IRQs */ +enum tps65224_irqs { + /* INT_BUCK register */ + TPS65224_IRQ_BUCK1_UVOV, + TPS65224_IRQ_BUCK2_UVOV, + TPS65224_IRQ_BUCK3_UVOV, + TPS65224_IRQ_BUCK4_UVOV, + /* INT_LDO_VMON register */ + TPS65224_IRQ_LDO1_UVOV, + TPS65224_IRQ_LDO2_UVOV, + TPS65224_IRQ_LDO3_UVOV, + TPS65224_IRQ_VCCA_UVOV, + TPS65224_IRQ_VMON1_UVOV, + TPS65224_IRQ_VMON2_UVOV, + /* INT_GPIO register */ + TPS65224_IRQ_GPIO1, + TPS65224_IRQ_GPIO2, + TPS65224_IRQ_GPIO3, + TPS65224_IRQ_GPIO4, + TPS65224_IRQ_GPIO5, + TPS65224_IRQ_GPIO6, + /* INT_STARTUP register */ + TPS65224_IRQ_VSENSE, + TPS65224_IRQ_ENABLE, + TPS65224_IRQ_PB_SHORT, + TPS65224_IRQ_FSD, + TPS65224_IRQ_SOFT_REBOOT, + /* INT_MISC register */ + TPS65224_IRQ_BIST_PASS, + TPS65224_IRQ_EXT_CLK, + TPS65224_IRQ_REG_UNLOCK, + TPS65224_IRQ_TWARN, + TPS65224_IRQ_PB_LONG, + TPS65224_IRQ_PB_FALL, + TPS65224_IRQ_PB_RISE, + TPS65224_IRQ_ADC_CONV_READY, + /* INT_MODERATE_ERR register */ + TPS65224_IRQ_TSD_ORD, + TPS65224_IRQ_BIST_FAIL, + TPS65224_IRQ_REG_CRC_ERR, + TPS65224_IRQ_RECOV_CNT, + /* INT_SEVERE_ERR register */ + TPS65224_IRQ_TSD_IMM, + TPS65224_IRQ_VCCA_OVP, + TPS65224_IRQ_PFSM_ERR, + TPS65224_IRQ_BG_XMON, + /* INT_FSM_ERR register */ + TPS65224_IRQ_IMM_SHUTDOWN, + TPS65224_IRQ_ORD_SHUTDOWN, + TPS65224_IRQ_MCU_PWR_ERR, + TPS65224_IRQ_SOC_PWR_ERR, + TPS65224_IRQ_COMM_ERR, + TPS65224_IRQ_I2C2_ERR, +}; + +#define TPS65224_IRQ_NAME_BUCK1_UVOV "buck1_uvov" +#define TPS65224_IRQ_NAME_BUCK2_UVOV "buck2_uvov" +#define TPS65224_IRQ_NAME_BUCK3_UVOV "buck3_uvov" +#define TPS65224_IRQ_NAME_BUCK4_UVOV "buck4_uvov" +#define TPS65224_IRQ_NAME_LDO1_UVOV "ldo1_uvov" +#define TPS65224_IRQ_NAME_LDO2_UVOV "ldo2_uvov" +#define TPS65224_IRQ_NAME_LDO3_UVOV "ldo3_uvov" +#define TPS65224_IRQ_NAME_VCCA_UVOV "vcca_uvov" +#define TPS65224_IRQ_NAME_VMON1_UVOV "vmon1_uvov" +#define TPS65224_IRQ_NAME_VMON2_UVOV "vmon2_uvov" +#define TPS65224_IRQ_NAME_GPIO1 "gpio1" +#define TPS65224_IRQ_NAME_GPIO2 "gpio2" +#define TPS65224_IRQ_NAME_GPIO3 "gpio3" +#define TPS65224_IRQ_NAME_GPIO4 "gpio4" +#define TPS65224_IRQ_NAME_GPIO5 "gpio5" +#define TPS65224_IRQ_NAME_GPIO6 "gpio6" +#define TPS65224_IRQ_NAME_VSENSE "vsense" +#define TPS65224_IRQ_NAME_ENABLE "enable" +#define TPS65224_IRQ_NAME_PB_SHORT "pb_short" +#define TPS65224_IRQ_NAME_FSD "fsd" +#define TPS65224_IRQ_NAME_SOFT_REBOOT "soft_reboot" +#define TPS65224_IRQ_NAME_BIST_PASS "bist_pass" +#define TPS65224_IRQ_NAME_EXT_CLK "ext_clk" +#define TPS65224_IRQ_NAME_REG_UNLOCK "reg_unlock" +#define TPS65224_IRQ_NAME_TWARN "twarn" +#define TPS65224_IRQ_NAME_PB_LONG "pb_long" +#define TPS65224_IRQ_NAME_PB_FALL "pb_fall" +#define TPS65224_IRQ_NAME_PB_RISE "pb_rise" +#define TPS65224_IRQ_NAME_ADC_CONV_READY "adc_conv_ready" +#define TPS65224_IRQ_NAME_TSD_ORD "tsd_ord" +#define TPS65224_IRQ_NAME_BIST_FAIL "bist_fail" +#define TPS65224_IRQ_NAME_REG_CRC_ERR "reg_crc_err" +#define TPS65224_IRQ_NAME_RECOV_CNT "recov_cnt" +#define TPS65224_IRQ_NAME_TSD_IMM "tsd_imm" +#define TPS65224_IRQ_NAME_VCCA_OVP "vcca_ovp" +#define TPS65224_IRQ_NAME_PFSM_ERR "pfsm_err" +#define TPS65224_IRQ_NAME_BG_XMON "bg_xmon" +#define TPS65224_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown" +#define TPS65224_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown" +#define TPS65224_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err" +#define TPS65224_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err" +#define TPS65224_IRQ_NAME_COMM_ERR "comm_err" +#define TPS65224_IRQ_NAME_I2C2_ERR "i2c2_err" +#define TPS65224_IRQ_NAME_POWERUP "powerup" + /** * struct tps6594 - device private data structure * @@ -1014,7 +1337,9 @@ struct tps6594 { struct regmap_irq_chip_data *irq_data; }; -bool tps6594_is_volatile_reg(struct device *dev, unsigned int reg); +extern const struct regmap_access_table tps6594_volatile_table; +extern const struct regmap_access_table tps65224_volatile_table; + int tps6594_device_init(struct tps6594 *tps, bool enable_crc); #endif /* __LINUX_MFD_TPS6594_H */ diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 77b8c0a266..b573f15762 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -353,6 +353,7 @@ struct mhi_controller_config { * @read_reg: Read a MHI register via the physical link (required) * @write_reg: Write a MHI register via the physical link (required) * @reset: Controller specific reset function (optional) + * @edl_trigger: CB function to trigger EDL mode (optional) * @buffer_len: Bounce buffer length * @index: Index of the MHI controller instance * @bounce_buf: Use of bounce buffer @@ -435,6 +436,7 @@ struct mhi_controller { void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 val); void (*reset)(struct mhi_controller *mhi_cntrl); + int (*edl_trigger)(struct mhi_controller *mhi_cntrl); size_t buffer_len; int index; @@ -630,13 +632,29 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl); int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); /** - * mhi_power_down - Start MHI power down sequence + * mhi_power_down - Power down the MHI device and also destroy the + * 'struct device' for the channels associated with it. + * See also mhi_power_down_keep_dev() which is a variant + * of this API that keeps the 'struct device' for channels + * (useful during suspend/hibernation). * @mhi_cntrl: MHI controller * @graceful: Link is still accessible, so do a graceful shutdown process */ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); /** + * mhi_power_down_keep_dev - Power down the MHI device but keep the 'struct + * device' for the channels associated with it. + * This is a variant of 'mhi_power_down()' and + * useful in scenarios such as suspend/hibernation + * where destroying of the 'struct device' is not + * needed. + * @mhi_cntrl: MHI controller + * @graceful: Link is still accessible, so do a graceful shutdown process + */ +void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, bool graceful); + +/** * mhi_unprepare_after_power_down - Free any allocated memory after power down * @mhi_cntrl: MHI controller */ @@ -798,4 +816,13 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, */ bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir); +/** + * mhi_get_channel_doorbell_offset - Get the channel doorbell offset + * @mhi_cntrl: MHI controller + * @chdb_offset: Read channel doorbell offset + * + * Return: 0 if the read succeeds, a negative error code otherwise + */ +int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset); + #endif /* _MHI_H_ */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index cb15308b5c..991526039c 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -95,9 +95,10 @@ enum { }; enum { - MLX5_CQ_MODIFY_PERIOD = 1 << 0, - MLX5_CQ_MODIFY_COUNT = 1 << 1, - MLX5_CQ_MODIFY_OVERRUN = 1 << 2, + MLX5_CQ_MODIFY_PERIOD = BIT(0), + MLX5_CQ_MODIFY_COUNT = BIT(1), + MLX5_CQ_MODIFY_OVERRUN = BIT(2), + MLX5_CQ_MODIFY_PERIOD_MODE = BIT(4), }; enum { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 01275c6e84..d7bb31d9a4 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -68,7 +68,7 @@ #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) -#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld))) +#define MLX5_ADDR_OF(typ, p, fld) ((void *)((u8 *)(p) + MLX5_BYTE_OFF(typ, fld))) /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ @@ -1336,6 +1336,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap) +#define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap) + #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) @@ -1359,6 +1362,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) +#define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \ + MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap) + #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 80452bd982..779cfdf2e9 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -85,7 +85,7 @@ enum mlx5_sqp_t { }; enum { - MLX5_MAX_PORTS = 4, + MLX5_MAX_PORTS = 8, }; enum { @@ -1375,11 +1375,4 @@ static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) enum { MLX5_OCTWORD = 16, }; - -struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev, - irqreturn_t (*handler)(int, void *), - const struct irq_affinity_desc *affdesc, - const char *name); -void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map); - #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5e5a9c6774..d45bfb7cf8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -416,7 +416,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits { /* Table 2170 - Flow Table Fields Supported 2 Format */ struct mlx5_ifc_flow_table_fields_supported_2_bits { - u8 reserved_at_0[0xe]; + u8 reserved_at_0[0x2]; + u8 inner_l4_type[0x1]; + u8 outer_l4_type[0x1]; + u8 reserved_at_4[0xa]; u8 bth_opcode[0x1]; u8 reserved_at_f[0x1]; u8 tunnel_header_0_1[0x1]; @@ -525,6 +528,12 @@ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { u8 reserved_at_0[0x80]; }; +enum { + MLX5_PACKET_L4_TYPE_NONE, + MLX5_PACKET_L4_TYPE_TCP, + MLX5_PACKET_L4_TYPE_UDP, +}; + struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; @@ -550,7 +559,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_at_c0[0x10]; + u8 l4_type[0x2]; + u8 reserved_at_c2[0xe]; u8 ipv4_ihl[0x4]; u8 reserved_at_c4[0x4]; @@ -846,7 +856,11 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; - u8 reserved_at_e00[0x700]; + u8 reserved_at_e00[0x600]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive; + + u8 reserved_at_1480[0x80]; struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma; @@ -876,7 +890,9 @@ struct mlx5_ifc_port_selection_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection; - u8 reserved_at_400[0x7c00]; + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_port_selection; + + u8 reserved_at_480[0x7b80]; }; enum { @@ -1469,7 +1485,9 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x10]; + u8 reserved_at_0[0x6]; + u8 page_request_disable[0x1]; + u8 reserved_at_7[0x9]; u8 shared_object_to_user_object_allowed[0x1]; u8 reserved_at_13[0xe]; u8 vhca_resource_manager[0x1]; @@ -1668,7 +1686,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 reserved_at_223[0x3]; + u8 cq_period_mode_modify[0x1]; + u8 reserved_at_224[0x2]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; @@ -2004,7 +2023,17 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_3a0[0x10]; u8 max_rqt_vhca_id[0x10]; - u8 reserved_at_3c0[0x440]; + u8 reserved_at_3c0[0x20]; + + u8 reserved_at_3e0[0x10]; + u8 pcc_ifa2[0x1]; + u8 reserved_at_3f1[0xf]; + + u8 reserved_at_400[0x40]; + + u8 reserved_at_440[0x8]; + u8 max_num_eqs_24b[0x18]; + u8 reserved_at_460[0x3a0]; }; enum mlx5_ifc_flow_destination_type { @@ -4361,10 +4390,10 @@ enum { MLX5_CQC_ST_FIRED = 0xa, }; -enum { +enum mlx5_cq_period_mode { MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, - MLX5_CQ_PERIOD_NUM_MODES + MLX5_CQ_PERIOD_NUM_MODES, }; struct mlx5_ifc_cqc_bits { @@ -9793,7 +9822,21 @@ struct mlx5_ifc_pplm_reg_bits { u8 fec_override_admin_100g_2x[0x10]; u8 fec_override_admin_50g_1x[0x10]; - u8 reserved_at_140[0x140]; + u8 fec_override_cap_800g_8x[0x10]; + u8 fec_override_cap_400g_4x[0x10]; + + u8 fec_override_cap_200g_2x[0x10]; + u8 fec_override_cap_100g_1x[0x10]; + + u8 reserved_at_180[0xa0]; + + u8 fec_override_admin_800g_8x[0x10]; + u8 fec_override_admin_400g_4x[0x10]; + + u8 fec_override_admin_200g_2x[0x10]; + u8 fec_override_admin_100g_1x[0x10]; + + u8 reserved_at_260[0x20]; }; struct mlx5_ifc_ppcnt_reg_bits { @@ -10165,7 +10208,9 @@ struct mlx5_ifc_mtutc_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x68]; + u8 reserved_at_0[0x48]; + u8 fec_100G_per_lane_in_pplm[0x1]; + u8 reserved_at_49[0x1f]; u8 fec_50G_per_lane_in_pplm[0x1]; u8 reserved_at_69[0x4]; u8 rx_icrc_encapsulated_counter[0x1]; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index f0e55bf3ec..ad1ce65014 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -576,9 +576,12 @@ static inline const char *mlx5_qp_state_str(int state) static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) { - return !MLX5_CAP_ROCE(dev, qp_ts_format) ? - MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_TIMESTAMP_FORMAT_DEFAULT; + u8 supported_ts_cap = mlx5_get_roce_state(dev) ? + MLX5_CAP_ROCE(dev, qp_ts_format) : + MLX5_CAP_GEN(dev, sq_ts_format); + + return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; } #endif /* MLX5_QP_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b6bdaa18b9..b58bad248e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -5,6 +5,7 @@ #include <linux/errno.h> #include <linux/mmdebug.h> #include <linux/gfp.h> +#include <linux/pgalloc_tag.h> #include <linux/bug.h> #include <linux/list.h> #include <linux/mmzone.h> @@ -405,6 +406,11 @@ extern unsigned int kobjsize(const void *objp); #define VM_ALLOW_ANY_UNCACHED VM_NONE #endif +#ifdef CONFIG_64BIT +/* VM is sealed, in vm_flags */ +#define VM_SEALED _BITUL(63) +#endif + /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) @@ -1199,7 +1205,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) * debugging purposes - it does not include PTE-mapped sub-pages; look * at folio_mapcount() or page_mapcount() instead. */ -static inline int folio_entire_mapcount(struct folio *folio) +static inline int folio_entire_mapcount(const struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); return atomic_read(&folio->_entire_mapcount) + 1; @@ -1231,7 +1237,7 @@ static inline int page_mapcount(struct page *page) int mapcount = atomic_read(&page->_mapcount) + 1; /* Handle page_has_type() pages */ - if (mapcount < 0) + if (mapcount < PAGE_MAPCOUNT_RESERVE + 1) mapcount = 0; if (unlikely(PageCompound(page))) mapcount += folio_entire_mapcount(page_folio(page)); @@ -1239,34 +1245,44 @@ static inline int page_mapcount(struct page *page) return mapcount; } -int folio_total_mapcount(struct folio *folio); +static inline int folio_large_mapcount(const struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_large(folio), folio); + return atomic_read(&folio->_large_mapcount) + 1; +} /** - * folio_mapcount() - Calculate the number of mappings of this folio. + * folio_mapcount() - Number of mappings of this folio. * @folio: The folio. * - * A large folio tracks both how many times the entire folio is mapped, - * and how many times each individual page in the folio is mapped. - * This function calculates the total number of times the folio is - * mapped. + * The folio mapcount corresponds to the number of present user page table + * entries that reference any part of a folio. Each such present user page + * table entry must be paired with exactly on folio reference. + * + * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts + * exactly once. + * + * For hugetlb folios, each abstracted "hugetlb" user page table entry that + * references the entire folio counts exactly once, even when such special + * page table entries are comprised of multiple ordinary page table entries. + * + * Will report 0 for pages which cannot be mapped into userspace, such as + * slab, page tables and similar. * * Return: The number of times this folio is mapped. */ -static inline int folio_mapcount(struct folio *folio) +static inline int folio_mapcount(const struct folio *folio) { - if (likely(!folio_test_large(folio))) - return atomic_read(&folio->_mapcount) + 1; - return folio_total_mapcount(folio); -} + int mapcount; -static inline bool folio_large_is_mapped(struct folio *folio) -{ - /* - * Reading _entire_mapcount below could be omitted if hugetlb - * participated in incrementing nr_pages_mapped when compound mapped. - */ - return atomic_read(&folio->_nr_pages_mapped) > 0 || - atomic_read(&folio->_entire_mapcount) >= 0; + if (likely(!folio_test_large(folio))) { + mapcount = atomic_read(&folio->_mapcount) + 1; + /* Handle page_has_type() pages */ + if (mapcount < PAGE_MAPCOUNT_RESERVE + 1) + mapcount = 0; + return mapcount; + } + return folio_large_mapcount(folio); } /** @@ -1275,11 +1291,9 @@ static inline bool folio_large_is_mapped(struct folio *folio) * * Return: True if any page in this folio is referenced by user page tables. */ -static inline bool folio_mapped(struct folio *folio) +static inline bool folio_mapped(const struct folio *folio) { - if (likely(!folio_test_large(folio))) - return atomic_read(&folio->_mapcount) >= 0; - return folio_large_is_mapped(folio); + return folio_mapcount(folio) >= 1; } /* @@ -1287,11 +1301,9 @@ static inline bool folio_mapped(struct folio *folio) * For compound page it returns true if any sub-page of compound page is mapped, * even if this particular sub-page is not itself mapped by any PTE or PMD. */ -static inline bool page_mapped(struct page *page) +static inline bool page_mapped(const struct page *page) { - if (likely(!PageCompound(page))) - return atomic_read(&page->_mapcount) >= 0; - return folio_large_is_mapped(page_folio(page)); + return folio_mapped(page_folio(page)); } static inline struct page *virt_to_head_page(const void *x) @@ -1317,8 +1329,6 @@ void folio_copy(struct folio *dst, struct folio *src); unsigned long nr_free_buffer_pages(void); -void destroy_large_folio(struct folio *folio); - /* Returns the number of bytes in this potentially compound page. */ static inline unsigned long page_size(struct page *page) { @@ -1436,27 +1446,22 @@ vm_fault_t finish_fault(struct vm_fault *vmf); #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); -bool __put_devmap_managed_page_refs(struct page *page, int refs); -static inline bool put_devmap_managed_page_refs(struct page *page, int refs) +bool __put_devmap_managed_folio_refs(struct folio *folio, int refs); +static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) { if (!static_branch_unlikely(&devmap_managed_key)) return false; - if (!is_zone_device_page(page)) + if (!folio_is_zone_device(folio)) return false; - return __put_devmap_managed_page_refs(page, refs); + return __put_devmap_managed_folio_refs(folio, refs); } #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_page_refs(struct page *page, int refs) +static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) { return false; } #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_page(struct page *page) -{ - return put_devmap_managed_page_refs(page, 1); -} - /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ ((unsigned int) folio_ref_count(folio) + 127u <= 127u) @@ -1575,7 +1580,7 @@ static inline void put_page(struct page *page) * For some devmap managed pages we need to catch refcount transition * from 2 to 1: */ - if (put_devmap_managed_page(&folio->page)) + if (put_devmap_managed_folio_refs(folio, 1)) return; folio_put(folio); } @@ -2069,7 +2074,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, * * Return: A positive power of two. */ -static inline long folio_nr_pages(struct folio *folio) +static inline long folio_nr_pages(const struct folio *folio) { if (!folio_test_large(folio)) return 1; @@ -2164,21 +2169,64 @@ static inline size_t folio_size(struct folio *folio) } /** - * folio_estimated_sharers - Estimate the number of sharers of a folio. + * folio_likely_mapped_shared - Estimate if the folio is mapped into the page + * tables of more than one MM * @folio: The folio. * - * folio_estimated_sharers() aims to serve as a function to efficiently - * estimate the number of processes sharing a folio. This is done by - * looking at the precise mapcount of the first subpage in the folio, and - * assuming the other subpages are the same. This may not be true for large - * folios. If you want exact mapcounts for exact calculations, look at - * page_mapcount() or folio_total_mapcount(). + * This function checks if the folio is currently mapped into more than one + * MM ("mapped shared"), or if the folio is only mapped into a single MM + * ("mapped exclusively"). + * + * As precise information is not easily available for all folios, this function + * estimates the number of MMs ("sharers") that are currently mapping a folio + * using the number of times the first page of the folio is currently mapped + * into page tables. * - * Return: The estimated number of processes sharing a folio. + * For small anonymous folios (except KSM folios) and anonymous hugetlb folios, + * the return value will be exactly correct, because they can only be mapped + * at most once into an MM, and they cannot be partially mapped. + * + * For other folios, the result can be fuzzy: + * #. For partially-mappable large folios (THP), the return value can wrongly + * indicate "mapped exclusively" (false negative) when the folio is + * only partially mapped into at least one MM. + * #. For pagecache folios (including hugetlb), the return value can wrongly + * indicate "mapped shared" (false positive) when two VMAs in the same MM + * cover the same file range. + * #. For (small) KSM folios, the return value can wrongly indicate "mapped + * shared" (false positive), when the folio is mapped multiple times into + * the same MM. + * + * Further, this function only considers current page table mappings that + * are tracked using the folio mapcount(s). + * + * This function does not consider: + * #. If the folio might get mapped in the (near) future (e.g., swapcache, + * pagecache, temporary unmapping for migration). + * #. If the folio is mapped differently (VM_PFNMAP). + * #. If hugetlb page table sharing applies. Callers might want to check + * hugetlb_pmd_shared(). + * + * Return: Whether the folio is estimated to be mapped into more than one MM. */ -static inline int folio_estimated_sharers(struct folio *folio) +static inline bool folio_likely_mapped_shared(struct folio *folio) { - return page_mapcount(folio_page(folio, 0)); + int mapcount = folio_mapcount(folio); + + /* Only partially-mappable folios require more care. */ + if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio))) + return mapcount > 1; + + /* A single mapping implies "mapped exclusively". */ + if (mapcount <= 1) + return false; + + /* If any page is mapped more than once we treat it "mapped shared". */ + if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio)) + return true; + + /* Let's guess based on the first subpage. */ + return atomic_read(&folio->_mapcount) > 0; } #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE @@ -2393,12 +2441,8 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); -int follow_pte(struct mm_struct *mm, unsigned long address, +int follow_pte(struct vm_area_struct *vma, unsigned long address, pte_t **ptepp, spinlock_t **ptlp); -int follow_pfn(struct vm_area_struct *vma, unsigned long address, - unsigned long *pfn); -int follow_phys(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); @@ -2552,7 +2596,7 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, MM_CP_UFFD_WP_RESOLVE) bool vma_needs_dirty_tracking(struct vm_area_struct *vma); -int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); +bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) { /* @@ -2859,12 +2903,13 @@ static inline bool pagetable_is_reserved(struct ptdesc *pt) * * Return: The ptdesc describing the allocated page tables. */ -static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order) +static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order) { - struct page *page = alloc_pages(gfp | __GFP_COMP, order); + struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order); return page_ptdesc(page); } +#define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__)) /** * pagetable_free - Free pagetables @@ -3132,13 +3177,7 @@ extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid); /* Free the reserved page into the buddy system, so it gets managed. */ -static inline void free_reserved_page(struct page *page) -{ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - adjust_managed_page_count(page, 1); -} +void free_reserved_page(struct page *page); #define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) @@ -3195,8 +3234,6 @@ static inline unsigned long get_num_physpages(void) */ void free_area_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); -unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, - unsigned long end_pfn); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); extern void get_pfn_range_for_nid(unsigned int nid, @@ -3212,7 +3249,6 @@ static inline int early_pfn_to_nid(unsigned long pfn) extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif -extern void set_dma_reserve(unsigned long new_dma_reserve); extern void mem_init(void); extern void __init mmap_init(void); @@ -3224,9 +3260,6 @@ static inline void show_mem(void) extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); -#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES -extern unsigned long arch_reserved_kernel_pages(void); -#endif extern __printf(3, 4) void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); @@ -3385,7 +3418,16 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long randomize_stack_top(unsigned long stack_top); unsigned long randomize_page(unsigned long start, unsigned long range); -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); +unsigned long +__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); + +static inline unsigned long +get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + return __get_unmapped_area(file, addr, len, pgoff, flags, 0); +} extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, @@ -3431,6 +3473,7 @@ struct vm_unmapped_area_info { unsigned long high_limit; unsigned long align_mask; unsigned long align_offset; + unsigned long start_gap; }; extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); @@ -3787,24 +3830,22 @@ static inline bool page_is_guard(struct page *page) return PageGuard(page); } -bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, - int migratetype); +bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order); static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) + unsigned int order) { if (!debug_guardpage_enabled()) return false; - return __set_page_guard(zone, page, order, migratetype); + return __set_page_guard(zone, page, order); } -void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, - int migratetype); +void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order); static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) + unsigned int order) { if (!debug_guardpage_enabled()) return; - __clear_page_guard(zone, page, order, migratetype); + __clear_page_guard(zone, page, order); } #else /* CONFIG_DEBUG_PAGEALLOC */ @@ -3814,9 +3855,9 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline bool debug_guardpage_enabled(void) { return false; } static inline bool page_is_guard(struct page *page) { return false; } static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) { return false; } + unsigned int order) { return false; } static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) {} + unsigned int order) {} #endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA @@ -3971,7 +4012,6 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); -extern void shake_page(struct page *p); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); #ifdef CONFIG_MEMORY_FAILURE @@ -4204,4 +4244,7 @@ static inline bool pfn_is_unaccepted_memory(unsigned long pfn) return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE); } +void vma_pgtable_walk_begin(struct vm_area_struct *vma); +void vma_pgtable_walk_end(struct vm_area_struct *vma); + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 962e7e05e4..af3a0256fa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -169,7 +169,7 @@ struct page { /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ atomic_t _refcount; -#ifdef CONFIG_MEMCG +#ifdef CONFIG_SLAB_OBJ_EXT unsigned long memcg_data; #endif @@ -289,7 +289,8 @@ typedef struct { * @virtual: Virtual address in the kernel direct map. * @_last_cpupid: IDs of last CPU and last process that accessed the folio. * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). - * @_nr_pages_mapped: Do not use directly, call folio_mapcount(). + * @_large_mapcount: Do not use directly, call folio_mapcount(). + * @_nr_pages_mapped: Do not use outside of rmap and debug code. * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). * @_folio_nr_pages: Do not use directly, call folio_nr_pages(). * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h. @@ -331,7 +332,7 @@ struct folio { }; atomic_t _mapcount; atomic_t _refcount; -#ifdef CONFIG_MEMCG +#ifdef CONFIG_SLAB_OBJ_EXT unsigned long memcg_data; #endif #if defined(WANT_PAGE_VIRTUAL) @@ -348,8 +349,8 @@ struct folio { struct { unsigned long _flags_1; unsigned long _head_1; - unsigned long _folio_avail; /* public: */ + atomic_t _large_mapcount; atomic_t _entire_mapcount; atomic_t _nr_pages_mapped; atomic_t _pincount; @@ -671,6 +672,9 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK + /* Flag to indicate areas detached from the mm->mm_mt tree */ + bool detached; + /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) @@ -687,9 +691,6 @@ struct vm_area_struct { */ int vm_lock_seq; struct vma_lock *vm_lock; - - /* Flag to indicate areas detached from the mm->mm_mt tree */ - bool detached; #endif /* @@ -777,11 +778,7 @@ struct mm_struct { } ____cacheline_aligned_in_smp; struct maple_tree mm_mt; -#ifdef CONFIG_MMU - unsigned long (*get_unmapped_area) (struct file *filp, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags); -#endif + unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES @@ -1170,14 +1167,15 @@ static inline void mm_init_cid(struct mm_struct *mm) cpumask_clear(mm_cidmask(mm)); } -static inline int mm_alloc_cid(struct mm_struct *mm) +static inline int mm_alloc_cid_noprof(struct mm_struct *mm) { - mm->pcpu_cid = alloc_percpu(struct mm_cid); + mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid); if (!mm->pcpu_cid) return -ENOMEM; mm_init_cid(mm); return 0; } +#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__)) static inline void mm_destroy_cid(struct mm_struct *mm) { @@ -1370,6 +1368,15 @@ enum fault_flag { typedef unsigned int __bitwise zap_flags_t; +/* Flags for clear_young_dirty_ptes(). */ +typedef int __bitwise cydp_t; + +/* Clear the access bit */ +#define CYDP_CLEAR_YOUNG ((__force cydp_t)BIT(0)) + +/* Clear the dirty bit */ +#define CYDP_CLEAR_DIRTY ((__force cydp_t)BIT(1)) + /* * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each * other. Here is what they mean, and how to use them: diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 8d38dcb6d0..de9dc20b01 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -60,16 +60,14 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) #endif /* CONFIG_TRACING */ -static inline void mmap_assert_locked(struct mm_struct *mm) +static inline void mmap_assert_locked(const struct mm_struct *mm) { - lockdep_assert_held(&mm->mmap_lock); - VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); + rwsem_assert_held(&mm->mmap_lock); } -static inline void mmap_assert_write_locked(struct mm_struct *mm) +static inline void mmap_assert_write_locked(const struct mm_struct *mm) { - lockdep_assert_held_write(&mm->mmap_lock); - VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); + rwsem_assert_held_write(&mm->mmap_lock); } #ifdef CONFIG_PER_VMA_LOCK diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 5894bf912f..88c6a76042 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -433,8 +433,8 @@ struct mmc_host { mmc_pm_flag_t pm_caps; /* supported pm features */ /* host specific block data */ - unsigned int max_seg_size; /* see blk_queue_max_segment_size */ - unsigned short max_segs; /* see blk_queue_max_segments */ + unsigned int max_seg_size; /* lim->max_segment_size */ + unsigned short max_segs; /* lim->max_segments */ unsigned short unused; unsigned int max_req_size; /* maximum number of bytes in one req */ unsigned int max_blk_size; /* maximum size of one mmc block */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 478855b8e4..fed1f5f4a8 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -106,7 +106,10 @@ struct sdio_driver { .class = (dev_class), \ .vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID -extern int sdio_register_driver(struct sdio_driver *); +/* use a macro to avoid include chaining to get THIS_MODULE */ +#define sdio_register_driver(drv) \ + __sdio_register_driver(drv, THIS_MODULE) +extern int __sdio_register_driver(struct sdio_driver *, struct module *); extern void sdio_unregister_driver(struct sdio_driver *); /** diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 7fada7a714..7cddfdac2f 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -124,6 +124,7 @@ #define SDIO_DEVICE_ID_REALTEK_RTW8723DS_2ANT 0xd723 #define SDIO_DEVICE_ID_REALTEK_RTW8723DS_1ANT 0xd724 #define SDIO_DEVICE_ID_REALTEK_RTW8821DS 0xd821 +#define SDIO_DEVICE_ID_REALTEK_RTW8723CS 0xb703 #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 66272fdce4..274a2767ea 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -8,8 +8,8 @@ #ifndef MMC_SLOT_GPIO_H #define MMC_SLOT_GPIO_H +#include <linux/interrupt.h> #include <linux/types.h> -#include <linux/irqreturn.h> struct mmc_host; @@ -22,8 +22,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int idx, unsigned int debounce); int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config); -void mmc_gpio_set_cd_isr(struct mmc_host *host, - irqreturn_t (*isr)(int irq, void *dev_id)); +void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); void mmc_gpiod_request_cd_irq(struct mmc_host *host); bool mmc_can_gpio_cd(struct mmc_host *host); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index f349e08a9d..d39ebb10ca 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -123,15 +123,6 @@ struct mmu_notifier_ops { unsigned long address); /* - * change_pte is called in cases that pte mapping to page is changed: - * for example, when ksm remaps pte to point to a new shared page. - */ - void (*change_pte)(struct mmu_notifier *subscription, - struct mm_struct *mm, - unsigned long address, - pte_t pte); - - /* * invalidate_range_start() and invalidate_range_end() must be * paired and are called only when the mmap_lock and/or the * locks protecting the reverse maps are held. If the subsystem @@ -392,8 +383,6 @@ extern int __mmu_notifier_clear_young(struct mm_struct *mm, unsigned long end); extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); -extern void __mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte); extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r); extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, @@ -439,13 +428,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm, return 0; } -static inline void mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte) -{ - if (mm_has_notifiers(mm)) - __mmu_notifier_change_pte(mm, address, pte); -} - static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { @@ -581,26 +563,6 @@ static inline void mmu_notifier_range_init_owner( __young; \ }) -/* - * set_pte_at_notify() sets the pte _after_ running the notifier. - * This is safe to start by updating the secondary MMUs, because the primary MMU - * pte invalidate must have already happened with a ptep_clear_flush() before - * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is - * required when we change both the protection of the mapping from read-only to - * read-write and the pfn (like during copy on write page faults). Otherwise the - * old page would remain mapped readonly in the secondary MMUs after the new - * page is already writable by some CPU through the primary MMU. - */ -#define set_pte_at_notify(__mm, __address, __ptep, __pte) \ -({ \ - struct mm_struct *___mm = __mm; \ - unsigned long ___address = __address; \ - pte_t ___pte = __pte; \ - \ - mmu_notifier_change_pte(___mm, ___address, ___pte); \ - set_pte_at(___mm, ___address, __ptep, ___pte); \ -}) - #else /* CONFIG_MMU_NOTIFIER */ struct mmu_notifier_range { @@ -650,11 +612,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm, return 0; } -static inline void mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte) -{ -} - static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { @@ -693,7 +650,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush #define pudp_huge_clear_flush_notify pudp_huge_clear_flush -#define set_pte_at_notify set_pte_at static inline void mmu_notifier_synchronize(void) { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f8d89a021a..1dc6248feb 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -205,7 +205,10 @@ enum node_stat_item { NR_KERNEL_SCS_KB, /* measured in KiB */ #endif NR_PAGETABLE, /* used for pagetables */ - NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */ + NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */ +#ifdef CONFIG_IOMMU_SUPPORT + NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */ +#endif #ifdef CONFIG_SWAP NR_SWAPCACHE, #endif diff --git a/include/linux/module.h b/include/linux/module.h index 1153b0d99a..330ffb59ef 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -605,6 +605,11 @@ static inline bool module_is_live(struct module *mod) return mod->state != MODULE_STATE_GOING; } +static inline bool module_is_coming(struct module *mod) +{ + return mod->state == MODULE_STATE_COMING; +} + struct module *__module_text_address(unsigned long addr); struct module *__module_address(unsigned long addr); bool is_module_address(unsigned long addr); @@ -857,6 +862,10 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr) return ptr; } +static inline bool module_is_coming(struct module *mod) +{ + return false; +} #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS @@ -922,11 +931,11 @@ int module_kallsyms_on_each_symbol(const char *modname, * least KSYM_NAME_LEN long: a pointer to namebuf is returned if * found, otherwise NULL. */ -const char *module_address_lookup(unsigned long addr, - unsigned long *symbolsize, - unsigned long *offset, - char **modname, const unsigned char **modbuildid, - char *namebuf); +int module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, const unsigned char **modbuildid, + char *namebuf); int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, @@ -955,14 +964,14 @@ static inline int module_kallsyms_on_each_symbol(const char *modname, } /* For kallsyms to ask for address resolution. NULL means not found. */ -static inline const char *module_address_lookup(unsigned long addr, +static inline int module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, const unsigned char **modbuildid, char *namebuf) { - return NULL; + return 0; } static inline int lookup_module_symbol_name(unsigned long addr, char *symname) diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 89b1e0ed98..e395461d59 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -25,13 +25,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, /* Additional bytes needed by arch in front of individual sections */ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); -/* Allocator used for allocating struct module, core sections and init - sections. Returns NULL on failure. */ -void *module_alloc(unsigned long size); - -/* Free memory returned from module_alloc. */ -void module_memfree(void *module_region); - /* Determines if the section name is an init section (that is only used during * module loading). */ @@ -129,12 +122,4 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); -#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ - !defined(CONFIG_KASAN_VMALLOC) -#include <linux/kasan.h> -#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) -#else -#define MODULE_ALIGN PAGE_SIZE -#endif - #endif diff --git a/include/linux/msi.h b/include/linux/msi.h index 26d07e2305..dc27cf3903 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -573,8 +573,6 @@ enum { MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19), /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */ MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20), - /* Support for PCI/IMS */ - MSI_FLAG_PCI_IMS = (1 << 21), }; /** @@ -676,6 +674,12 @@ int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nve void platform_device_msi_free_irqs_all(struct device *dev); bool msi_device_has_isolated_msi(struct device *dev); + +static inline int msi_domain_alloc_irqs(struct device *dev, unsigned int domid, int nirqs) +{ + return msi_domain_alloc_irqs_range(dev, domid, 0, nirqs - 1); +} + #else /* CONFIG_GENERIC_MSI_IRQ */ static inline bool msi_device_has_isolated_msi(struct device *dev) { diff --git a/include/linux/msi_api.h b/include/linux/msi_api.h index 391087ad99..5ae72d1912 100644 --- a/include/linux/msi_api.h +++ b/include/linux/msi_api.h @@ -15,7 +15,6 @@ struct device; */ enum msi_domain_ids { MSI_DEFAULT_DOMAIN, - MSI_SECONDARY_DOMAIN, MSI_MAX_DEVICE_IRQDOMAINS, }; diff --git a/include/linux/namei.h b/include/linux/namei.h index 74e0cc14eb..967aa9ea9f 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -44,6 +44,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; #define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */ #define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */ #define LOOKUP_CACHED 0x200000 /* Only do cached lookup */ +#define LOOKUP_LINKAT_EMPTY 0x400000 /* Linkat request with empty path. */ /* LOOKUP_* flags which do scope-related checks based on the dirfd. */ #define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT) diff --git a/include/linux/net.h b/include/linux/net.h index 15df6d5f27..688320b79f 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -153,6 +153,7 @@ struct sockaddr; struct msghdr; struct module; struct sk_buff; +struct proto_accept_arg; typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); @@ -171,7 +172,8 @@ struct proto_ops { int (*socketpair)(struct socket *sock1, struct socket *sock2); int (*accept) (struct socket *sock, - struct socket *newsock, int flags, bool kern); + struct socket *newsock, + struct proto_accept_arg *arg); int (*getname) (struct socket *sock, struct sockaddr *addr, int peer); diff --git a/include/linux/net/intel/libie/rx.h b/include/linux/net/intel/libie/rx.h new file mode 100644 index 0000000000..8e97775f1d --- /dev/null +++ b/include/linux/net/intel/libie/rx.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef __LIBIE_RX_H +#define __LIBIE_RX_H + +#include <net/libeth/rx.h> + +/* Rx buffer management */ + +/* The largest size for a single descriptor as per HW */ +#define LIBIE_MAX_RX_BUF_LEN 9728U +/* "True" HW-writeable space: minimum from SW and HW values */ +#define LIBIE_RX_BUF_LEN(hr) min_t(u32, LIBETH_RX_PAGE_LEN(hr), \ + LIBIE_MAX_RX_BUF_LEN) + +/* The maximum frame size as per HW (S/G) */ +#define __LIBIE_MAX_RX_FRM_LEN 16382U +/* ATST, HW can chain up to 5 Rx descriptors */ +#define LIBIE_MAX_RX_FRM_LEN(hr) \ + min_t(u32, __LIBIE_MAX_RX_FRM_LEN, LIBIE_RX_BUF_LEN(hr) * 5) +/* Maximum frame size minus LL overhead */ +#define LIBIE_MAX_MTU \ + (LIBIE_MAX_RX_FRM_LEN(LIBETH_MAX_HEADROOM) - LIBETH_RX_LL_LEN) + +/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed + * bitfield struct. + */ + +#define LIBIE_RX_PT_NUM 154 + +extern const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM]; + +/** + * libie_rx_pt_parse - convert HW packet type to software bitfield structure + * @pt: 10-bit hardware packet type value from the descriptor + * + * ```libie_rx_pt_lut``` must be accessed only using this wrapper. + * + * Return: parsed bitfield struct corresponding to the provided ptype. + */ +static inline struct libeth_rx_pt libie_rx_pt_parse(u32 pt) +{ + if (unlikely(pt >= LIBIE_RX_PT_NUM)) + pt = 0; + + return libie_rx_pt_lut[pt]; +} + +#endif /* __LIBIE_RX_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cb37817d63..d20c6c99eb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -59,7 +59,7 @@ struct ethtool_ops; struct kernel_hwtstamp_config; struct phy_device; struct dsa_port; -struct ip_tunnel_parm; +struct ip_tunnel_parm_kern; struct macsec_context; struct macsec_ops; struct netdev_name_node; @@ -1327,7 +1327,7 @@ struct netdev_net_notifier { * queue id bound to an AF_XDP socket. The flags field specifies if * only RX, only Tx, or both should be woken up using the flags * XDP_WAKEUP_RX and XDP_WAKEUP_TX. - * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p, * int cmd); * Add, change, delete or get information on an IPv4 tunnel. * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); @@ -1583,7 +1583,8 @@ struct net_device_ops { int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); int (*ndo_tunnel_ctl)(struct net_device *dev, - struct ip_tunnel_parm *p, int cmd); + struct ip_tunnel_parm_kern *p, + int cmd); struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); @@ -1956,6 +1957,7 @@ enum netdev_reg_state { * @sysfs_rx_queue_group: Space for optional per-rx queue attributes * @rtnl_link_ops: Rtnl_link_ops * @stat_ops: Optional ops for queue-aware statistics + * @queue_mgmt_ops: Optional ops for queue management * * @gso_max_size: Maximum size of generic segmentation offload * @tso_max_size: Device (as in HW) limit on the max TSO request size @@ -2338,6 +2340,8 @@ struct net_device { const struct netdev_stat_ops *stat_ops; + const struct netdev_queue_mgmt_ops *queue_mgmt_ops; + /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SEGS 65535u #define GSO_LEGACY_MAX_SIZE 65536u @@ -2367,8 +2371,8 @@ struct net_device { struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; bool proto_down; + bool threaded; unsigned wol_enabled:1; - unsigned threaded:1; struct list_head net_notifier_list; @@ -3133,6 +3137,7 @@ struct net_device *netdev_get_by_name(struct net *net, const char *name, netdevice_tracker *tracker, gfp_t gfp); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); +void netdev_copy_name(struct net_device *dev, char *name); static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, @@ -3203,6 +3208,7 @@ struct softnet_data { struct softnet_data *rps_ipi_list; #endif + unsigned int received_rps; bool in_net_rx_action; bool in_napi_threaded_poll; @@ -3235,11 +3241,11 @@ struct softnet_data { unsigned int cpu; unsigned int input_queue_tail; #endif - unsigned int received_rps; - unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + atomic_t dropped ____cacheline_aligned_in_smp; + /* Another possibly contended cache line */ spinlock_t defer_lock ____cacheline_aligned_in_smp; int defer_count; @@ -3248,21 +3254,6 @@ struct softnet_data { call_single_data_t defer_csd; }; -static inline void input_queue_head_incr(struct softnet_data *sd) -{ -#ifdef CONFIG_RPS - sd->input_queue_head++; -#endif -} - -static inline void input_queue_tail_incr_save(struct softnet_data *sd, - unsigned int *qtail) -{ -#ifdef CONFIG_RPS - *qtail = ++sd->input_queue_tail; -#endif -} - DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); static inline int dev_recursion_level(void) @@ -3270,23 +3261,6 @@ static inline int dev_recursion_level(void) return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 8 -static inline bool dev_xmit_recursion(void) -{ - return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > - XMIT_RECURSION_LIMIT); -} - -static inline void dev_xmit_recursion_inc(void) -{ - __this_cpu_inc(softnet_data.xmit.recursion); -} - -static inline void dev_xmit_recursion_dec(void) -{ - __this_cpu_dec(softnet_data.xmit.recursion); -} - void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); @@ -4127,6 +4101,8 @@ static inline void dev_put(struct net_device *dev) netdev_put(dev, NULL); } +DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T)) + static inline void netdev_ref_replace(struct net_device *odev, struct net_device *ndev, netdevice_tracker *tracker, @@ -4545,6 +4521,9 @@ static inline void netif_addr_unlock_bh(struct net_device *dev) void ether_setup(struct net_device *dev); +/* Allocate dummy net_device */ +struct net_device *alloc_netdev_dummy(int sizeof_priv); + /* Support for loadable net-drivers */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned char name_assign_type, diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 100cbb2612..5d0288938c 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -20,95 +20,24 @@ #include <linux/uio.h> enum netfs_sreq_ref_trace; - -/* - * Overload PG_private_2 to give us PG_fscache - this is used to indicate that - * a page is currently backed by a local disk cache - */ -#define folio_test_fscache(folio) folio_test_private_2(folio) -#define PageFsCache(page) PagePrivate2((page)) -#define SetPageFsCache(page) SetPagePrivate2((page)) -#define ClearPageFsCache(page) ClearPagePrivate2((page)) -#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) -#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) +typedef struct mempool_s mempool_t; /** - * folio_start_fscache - Start an fscache write on a folio. + * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED] * @folio: The folio. * * Call this function before writing a folio to a local cache. Starting a * second write before the first one finishes is not allowed. + * + * Note that this should no longer be used. */ -static inline void folio_start_fscache(struct folio *folio) +static inline void folio_start_private_2(struct folio *folio) { VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio); folio_get(folio); folio_set_private_2(folio); } -/** - * folio_end_fscache - End an fscache write on a folio. - * @folio: The folio. - * - * Call this function after the folio has been written to the local cache. - * This will wake any sleepers waiting on this folio. - */ -static inline void folio_end_fscache(struct folio *folio) -{ - folio_end_private_2(folio); -} - -/** - * folio_wait_fscache - Wait for an fscache write on this folio to end. - * @folio: The folio. - * - * If this folio is currently being written to a local cache, wait for - * the write to finish. Another write may start after this one finishes, - * unless the caller holds the folio lock. - */ -static inline void folio_wait_fscache(struct folio *folio) -{ - folio_wait_private_2(folio); -} - -/** - * folio_wait_fscache_killable - Wait for an fscache write on this folio to end. - * @folio: The folio. - * - * If this folio is currently being written to a local cache, wait - * for the write to finish or for a fatal signal to be received. - * Another write may start after this one finishes, unless the caller - * holds the folio lock. - * - * Return: - * - 0 if successful. - * - -EINTR if a fatal signal was encountered. - */ -static inline int folio_wait_fscache_killable(struct folio *folio) -{ - return folio_wait_private_2_killable(folio); -} - -static inline void set_page_fscache(struct page *page) -{ - folio_start_fscache(page_folio(page)); -} - -static inline void end_page_fscache(struct page *page) -{ - folio_end_private_2(page_folio(page)); -} - -static inline void wait_on_page_fscache(struct page *page) -{ - folio_wait_private_2(page_folio(page)); -} - -static inline int wait_on_page_fscache_killable(struct page *page) -{ - return folio_wait_private_2_killable(page_folio(page)); -} - /* Marks used on xarray-based buffers */ #define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */ #define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */ @@ -135,14 +64,17 @@ struct netfs_inode { #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cache; #endif + struct mutex wb_lock; /* Writeback serialisation */ loff_t remote_i_size; /* Size of the remote file */ loff_t zero_point; /* Size after which we assume there's no data * on the server */ + atomic_t io_count; /* Number of outstanding reqs */ unsigned long flags; #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ -#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */ +#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark + * write to cache on read */ }; /* @@ -165,16 +97,25 @@ struct netfs_folio { unsigned int dirty_len; /* Write-streaming dirty data length */ }; #define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */ +#define NETFS_FOLIO_COPY_TO_CACHE ((struct netfs_group *)0x356UL) /* Write to the cache only */ -static inline struct netfs_folio *netfs_folio_info(struct folio *folio) +static inline bool netfs_is_folio_info(const void *priv) { - void *priv = folio_get_private(folio); + return (unsigned long)priv & NETFS_FOLIO_INFO; +} - if ((unsigned long)priv & NETFS_FOLIO_INFO) +static inline struct netfs_folio *__netfs_folio_info(const void *priv) +{ + if (netfs_is_folio_info(priv)) return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO); return NULL; } +static inline struct netfs_folio *netfs_folio_info(struct folio *folio) +{ + return __netfs_folio_info(folio_get_private(folio)); +} + static inline struct netfs_group *netfs_folio_group(struct folio *folio) { struct netfs_folio *finfo; @@ -187,6 +128,33 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio) } /* + * Stream of I/O subrequests going to a particular destination, such as the + * server or the local cache. This is mainly intended for writing where we may + * have to write to multiple destinations concurrently. + */ +struct netfs_io_stream { + /* Submission tracking */ + struct netfs_io_subrequest *construct; /* Op being constructed */ + unsigned int submit_off; /* Folio offset we're submitting from */ + unsigned int submit_len; /* Amount of data left to submit */ + unsigned int submit_max_len; /* Amount I/O can be rounded up to */ + void (*prepare_write)(struct netfs_io_subrequest *subreq); + void (*issue_write)(struct netfs_io_subrequest *subreq); + /* Collection tracking */ + struct list_head subrequests; /* Contributory I/O operations */ + struct netfs_io_subrequest *front; /* Op being collected */ + unsigned long long collected_to; /* Position we've collected results to */ + size_t transferred; /* The amount transferred from this stream */ + enum netfs_io_source source; /* Where to read from/write to */ + unsigned short error; /* Aggregate error for the stream */ + unsigned char stream_nr; /* Index of stream in parent table */ + bool avail; /* T if stream is available */ + bool active; /* T if stream is active */ + bool need_retry; /* T if this stream needs retrying */ + bool failed; /* T if this stream failed */ +}; + +/* * Resources required to do operations on a cache. */ struct netfs_cache_resources { @@ -209,14 +177,17 @@ struct netfs_io_subrequest { struct work_struct work; struct list_head rreq_link; /* Link in rreq->subrequests */ struct iov_iter io_iter; /* Iterator for this subrequest */ - loff_t start; /* Where to start the I/O */ + unsigned long long start; /* Where to start the I/O */ + size_t max_len; /* Maximum size of the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ + unsigned int nr_segs; /* Number of segs in io_iter */ unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */ enum netfs_io_source source; /* Where to read from/write to */ + unsigned char stream_nr; /* I/O stream this belongs to */ unsigned long flags; #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ @@ -224,15 +195,20 @@ struct netfs_io_subrequest { #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ +#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */ +#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */ +#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */ +#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */ +#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */ }; enum netfs_io_origin { NETFS_READAHEAD, /* This read was triggered by readahead */ NETFS_READPAGE, /* This read is a synchronous read */ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ + NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */ NETFS_WRITEBACK, /* This write was triggered by writepages */ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ - NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ NETFS_DIO_READ, /* This is a direct I/O read */ NETFS_DIO_WRITE, /* This is a direct I/O write */ @@ -254,26 +230,36 @@ struct netfs_io_request { struct netfs_cache_resources cache_resources; struct list_head proc_link; /* Link in netfs_iorequests */ struct list_head subrequests; /* Contributory I/O operations */ + struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */ +#define NR_IO_STREAMS 2 //wreq->nr_io_streams + struct netfs_group *group; /* Writeback group being written back */ struct iov_iter iter; /* Unencrypted-side iterator */ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ void *netfs_priv; /* Private data for the netfs */ + void *netfs_priv2; /* Private data for the netfs */ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */ unsigned int debug_id; unsigned int rsize; /* Maximum read size (0 for none) */ unsigned int wsize; /* Maximum write size (0 for none) */ - unsigned int subreq_counter; /* Next subreq->debug_index */ + atomic_t subreq_counter; /* Next subreq->debug_index */ + unsigned int nr_group_rel; /* Number of refs to release on ->group */ + spinlock_t lock; /* Lock for queuing subreqs */ atomic_t nr_outstanding; /* Number of ops in progress */ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ - size_t submitted; /* Amount submitted for I/O so far */ - size_t len; /* Length of the request */ size_t upper_len; /* Length can be extended to here */ + unsigned long long submitted; /* Amount submitted for I/O so far */ + unsigned long long len; /* Length of the request */ size_t transferred; /* Amount to be indicated as transferred */ short error; /* 0 or error that occurred */ enum netfs_io_origin origin; /* Origin of the request */ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ - loff_t i_size; /* Size of the file */ - loff_t start; /* Start position */ + unsigned long long i_size; /* Size of the file */ + unsigned long long start; /* Start position */ + atomic64_t issued_to; /* Write issuer folio cursor */ + unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */ + unsigned long long collected_to; /* Point we've collected to */ + unsigned long long cleaned_to; /* Position we've cleaned folios to */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ refcount_t ref; unsigned long flags; @@ -287,6 +273,11 @@ struct netfs_io_request { #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ #define NETFS_RREQ_BLOCKED 10 /* We blocked */ +#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */ +#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */ +#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */ +#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark + * write to cache on read */ const struct netfs_request_ops *netfs_ops; void (*cleanup)(struct netfs_io_request *req); }; @@ -295,8 +286,8 @@ struct netfs_io_request { * Operations the network filesystem can/must provide to the helpers. */ struct netfs_request_ops { - unsigned int io_request_size; /* Alloc size for netfs_io_request struct */ - unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */ + mempool_t *request_pool; + mempool_t *subrequest_pool; int (*init_request)(struct netfs_io_request *rreq, struct file *file); void (*free_request)(struct netfs_io_request *rreq); void (*free_subrequest)(struct netfs_io_subrequest *rreq); @@ -312,10 +303,13 @@ struct netfs_request_ops { /* Modification handling */ void (*update_i_size)(struct inode *inode, loff_t i_size); + void (*post_modify)(struct inode *inode); /* Write request handling */ - void (*create_write_requests)(struct netfs_io_request *wreq, - loff_t start, size_t len); + void (*begin_writeback)(struct netfs_io_request *wreq); + void (*prepare_write)(struct netfs_io_subrequest *subreq); + void (*issue_write)(struct netfs_io_subrequest *subreq); + void (*retry_request)(struct netfs_io_request *wreq, struct netfs_io_stream *stream); void (*invalidate_cache)(struct netfs_io_request *wreq); }; @@ -350,15 +344,27 @@ struct netfs_cache_ops { netfs_io_terminated_t term_func, void *term_func_priv); + /* Write data to the cache from a netfs subrequest. */ + void (*issue_write)(struct netfs_io_subrequest *subreq); + /* Expand readahead request */ void (*expand_readahead)(struct netfs_cache_resources *cres, - loff_t *_start, size_t *_len, loff_t i_size); + unsigned long long *_start, + unsigned long long *_len, + unsigned long long i_size); /* Prepare a read operation, shortening it to a cached/uncached * boundary as appropriate. */ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, - loff_t i_size); + unsigned long long i_size); + + /* Prepare a write subrequest, working out if we're allowed to do it + * and finding out the maximum amount of data to gather before + * attempting to submit. If we're not permitted to do it, the + * subrequest should be marked failed. + */ + void (*prepare_write_subreq)(struct netfs_io_subrequest *subreq); /* Prepare a write operation, working out what part of the write we can * actually do. @@ -384,6 +390,7 @@ struct netfs_cache_ops { }; /* High-level read API. */ +ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter); ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter); ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); @@ -394,6 +401,8 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from, struct netfs_group *netfs_group); ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from); +ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter, + struct netfs_group *netfs_group); ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from); /* Address operations API */ @@ -410,7 +419,6 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); void netfs_clear_inode_writeback(struct inode *inode, const void *aux); void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); bool netfs_release_folio(struct folio *folio, gfp_t gfp); -int netfs_launder_folio(struct folio *folio); /* VMA operations API. */ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); @@ -426,9 +434,7 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, iov_iter_extraction_t extraction_flags); size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, size_t max_size, size_t max_segs); -struct netfs_io_subrequest *netfs_create_write_request( - struct netfs_io_request *wreq, enum netfs_io_source dest, - loff_t start, size_t len, work_func_t worker); +void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq); void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, bool was_async); void netfs_queue_write_request(struct netfs_io_subrequest *subreq); @@ -469,9 +475,11 @@ static inline void netfs_inode_init(struct netfs_inode *ctx, ctx->remote_i_size = i_size_read(&ctx->inode); ctx->zero_point = LLONG_MAX; ctx->flags = 0; + atomic_set(&ctx->io_count, 0); #if IS_ENABLED(CONFIG_FSCACHE) ctx->cache = NULL; #endif + mutex_init(&ctx->wb_lock); /* ->releasepage() drives zero_point */ if (use_zero_point) { ctx->zero_point = ctx->remote_i_size; @@ -511,4 +519,20 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx) #endif } +/** + * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete + * @inode: The netfs inode to wait on + * + * Wait for outstanding I/O requests of any type to complete. This is intended + * to be called from inode eviction routines. This makes sure that any + * resources held by those requests are cleaned up before we let the inode get + * cleaned up. + */ +static inline void netfs_wait_for_outstanding_io(struct inode *inode) +{ + struct netfs_inode *ictx = netfs_inode(inode); + + wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0); +} + #endif /* _LINUX_NETFS_H */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index ef8d2d618d..0d896ce296 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -701,6 +701,12 @@ enum state_protect_how4 { SP4_SSV = 2 }; +/* GET_DIR_DELEGATION non-fatal status codes */ +enum gddrnf4_status { + GDD4_OK = 0, + GDD4_UNAVAIL = 1 +}; + enum pnfs_layouttype { LAYOUT_NFSV4_1_FILES = 1, LAYOUT_OSD2_OBJECTS = 2, diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index d59116ac82..039898d709 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -561,6 +561,9 @@ extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openfl extern void nfs_access_zap_cache(struct inode *inode); extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block); +extern int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned int open_flags, + umode_t mode); /* * linux/fs/nfs/symlink.c diff --git a/include/linux/nmi.h b/include/linux/nmi.h index f53438eae8..a8dfb38c9b 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -105,10 +105,12 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); extern void hardlockup_detector_perf_stop(void); extern void hardlockup_detector_perf_restart(void); extern void hardlockup_detector_perf_cleanup(void); +extern void hardlockup_config_perf_event(const char *str); #else static inline void hardlockup_detector_perf_stop(void) { } static inline void hardlockup_detector_perf_restart(void) { } static inline void hardlockup_detector_perf_cleanup(void) { } +static inline void hardlockup_config_perf_event(const char *str) { } #endif void watchdog_hardlockup_stop(void); diff --git a/include/linux/numa.h b/include/linux/numa.h index 1d43371faf..eb19503604 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -15,6 +15,11 @@ #define NUMA_NO_NODE (-1) #define NUMA_NO_MEMBLK (-1) +static inline bool numa_valid_node(int nid) +{ + return nid >= 0 && nid < MAX_NUMNODES; +} + /* optionally keep NUMA memory info available post init */ #ifdef CONFIG_NUMA_KEEP_MEMINFO #define __initdata_or_meminfo diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index f0ba0e0321..3ebeaa0ded 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -199,7 +199,10 @@ int nvmem_add_one_cell(struct nvmem_device *nvmem, int nvmem_layout_register(struct nvmem_layout *layout); void nvmem_layout_unregister(struct nvmem_layout *layout); -int nvmem_layout_driver_register(struct nvmem_layout_driver *drv); +#define nvmem_layout_driver_register(drv) \ + __nvmem_layout_driver_register(drv, THIS_MODULE) +int __nvmem_layout_driver_register(struct nvmem_layout_driver *drv, + struct module *owner); void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv); #define module_nvmem_layout_driver(__nvmem_layout_driver) \ module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \ diff --git a/include/linux/objagg.h b/include/linux/objagg.h index 78021777df..6df5b887dc 100644 --- a/include/linux/objagg.h +++ b/include/linux/objagg.h @@ -8,7 +8,6 @@ struct objagg_ops { size_t obj_size; bool (*delta_check)(void *priv, const void *parent_obj, const void *obj); - int (*hints_obj_cmp)(const void *obj1, const void *obj2); void * (*delta_create)(void *priv, void *parent_obj, void *obj); void (*delta_destroy)(void *priv, void *delta_priv); void * (*root_create)(void *priv, void *obj, unsigned int root_id); diff --git a/include/linux/objpool.h b/include/linux/objpool.h index 15aff4a17f..cb1758eaa2 100644 --- a/include/linux/objpool.h +++ b/include/linux/objpool.h @@ -5,6 +5,10 @@ #include <linux/types.h> #include <linux/refcount.h> +#include <linux/atomic.h> +#include <linux/cpumask.h> +#include <linux/irqflags.h> +#include <linux/smp.h> /* * objpool: ring-array based lockless MPMC queue @@ -69,7 +73,7 @@ typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context); * struct objpool_head - object pooling metadata * @obj_size: object size, aligned to sizeof(void *) * @nr_objs: total objs (to be pre-allocated with objpool) - * @nr_cpus: local copy of nr_cpu_ids + * @nr_possible_cpus: cached value of num_possible_cpus() * @capacity: max objs can be managed by one objpool_slot * @gfp: gfp flags for kmalloc & vmalloc * @ref: refcount of objpool @@ -81,7 +85,7 @@ typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context); struct objpool_head { int obj_size; int nr_objs; - int nr_cpus; + int nr_possible_cpus; int capacity; gfp_t gfp; refcount_t ref; @@ -118,13 +122,94 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, gfp_t gfp, void *context, objpool_init_obj_cb objinit, objpool_fini_cb release); +/* try to retrieve object from slot */ +static inline void *__objpool_try_get_slot(struct objpool_head *pool, int cpu) +{ + struct objpool_slot *slot = pool->cpu_slots[cpu]; + /* load head snapshot, other cpus may change it */ + uint32_t head = smp_load_acquire(&slot->head); + + while (head != READ_ONCE(slot->last)) { + void *obj; + + /* + * data visibility of 'last' and 'head' could be out of + * order since memory updating of 'last' and 'head' are + * performed in push() and pop() independently + * + * before any retrieving attempts, pop() must guarantee + * 'last' is behind 'head', that is to say, there must + * be available objects in slot, which could be ensured + * by condition 'last != head && last - head <= nr_objs' + * that is equivalent to 'last - head - 1 < nr_objs' as + * 'last' and 'head' are both unsigned int32 + */ + if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { + head = READ_ONCE(slot->head); + continue; + } + + /* obj must be retrieved before moving forward head */ + obj = READ_ONCE(slot->entries[head & slot->mask]); + + /* move head forward to mark it's consumption */ + if (try_cmpxchg_release(&slot->head, &head, head + 1)) + return obj; + } + + return NULL; +} + /** * objpool_pop() - allocate an object from objpool * @pool: object pool * * return value: object ptr or NULL if failed */ -void *objpool_pop(struct objpool_head *pool); +static inline void *objpool_pop(struct objpool_head *pool) +{ + void *obj = NULL; + unsigned long flags; + int i, cpu; + + /* disable local irq to avoid preemption & interruption */ + raw_local_irq_save(flags); + + cpu = raw_smp_processor_id(); + for (i = 0; i < pool->nr_possible_cpus; i++) { + obj = __objpool_try_get_slot(pool, cpu); + if (obj) + break; + cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1); + } + raw_local_irq_restore(flags); + + return obj; +} + +/* adding object to slot, abort if the slot was already full */ +static inline int +__objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu) +{ + struct objpool_slot *slot = pool->cpu_slots[cpu]; + uint32_t head, tail; + + /* loading tail and head as a local snapshot, tail first */ + tail = READ_ONCE(slot->tail); + + do { + head = READ_ONCE(slot->head); + /* fault caught: something must be wrong */ + WARN_ON_ONCE(tail - head > pool->nr_objs); + } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1)); + + /* now the tail position is reserved for the given obj */ + WRITE_ONCE(slot->entries[tail & slot->mask], obj); + /* update sequence to make this obj available for pop() */ + smp_store_release(&slot->last, tail + 1); + + return 0; +} /** * objpool_push() - reclaim the object and return back to objpool @@ -134,7 +219,19 @@ void *objpool_pop(struct objpool_head *pool); * return: 0 or error code (it fails only when user tries to push * the same object multiple times or wrong "objects" into objpool) */ -int objpool_push(void *obj, struct objpool_head *pool); +static inline int objpool_push(void *obj, struct objpool_head *pool) +{ + unsigned long flags; + int rc; + + /* disable local irq to avoid preemption & interruption */ + raw_local_irq_save(flags); + rc = __objpool_try_add_slot(obj, pool, raw_smp_processor_id()); + raw_local_irq_restore(flags); + + return rc; +} + /** * objpool_drop() - discard the object and deref objpool diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 4de2a24cad..e338282da6 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -11,7 +11,6 @@ struct reserved_mem_ops; struct reserved_mem { const char *name; unsigned long fdt_node; - unsigned long phandle; const struct reserved_mem_ops *ops; phys_addr_t base; phys_addr_t size; diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 51421fdbb0..6f9242259e 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -69,6 +69,7 @@ enum OID { OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ + OID_id_ansip521r1, /* 1.3.132.0.35 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index 8aa984ec1f..3cc5c4ed7f 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -10,17 +10,4 @@ typedef uintptr_t mbox_msg_t; #define omap_mbox_message(data) (u32)(mbox_msg_t)(data) -typedef int __bitwise omap_mbox_irq_t; -#define IRQ_TX ((__force omap_mbox_irq_t) 1) -#define IRQ_RX ((__force omap_mbox_irq_t) 2) - -struct mbox_chan; -struct mbox_client; - -struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, - const char *chan_name); - -void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); -void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); - #endif /* OMAP_MAILBOX_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4bf1c25fd1..b9e914e1fa 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -109,7 +109,6 @@ enum pageflags { PG_active, PG_workingset, PG_error, - PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, PG_reserved, @@ -513,9 +512,9 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } __PAGEFLAG(Locked, locked, PF_NO_TAIL) FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE) PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) -PAGEFLAG(Referenced, referenced, PF_HEAD) - TESTCLEARFLAG(Referenced, referenced, PF_HEAD) - __SETPAGEFLAG(Referenced, referenced, PF_HEAD) +FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE) + FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE) + __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE) PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) @@ -524,7 +523,6 @@ PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) TESTCLEARFLAG(Active, active, PF_HEAD) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) -__PAGEFLAG(Slab, slab, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ /* Xen */ @@ -628,12 +626,19 @@ PAGEFLAG_FALSE(HWPoison, hwpoison) #define __PG_HWPOISON 0 #endif -#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) +#ifdef CONFIG_PAGE_IDLE_FLAG +#ifdef CONFIG_64BIT FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_FLAG(idle, FOLIO_HEAD_PAGE) #endif +/* See page_idle.h for !64BIT workaround */ +#else /* !CONFIG_PAGE_IDLE_FLAG */ +FOLIO_FLAG_FALSE(young) +FOLIO_TEST_CLEAR_FLAG_FALSE(young) +FOLIO_FLAG_FALSE(idle) +#endif /* * PageReported() is used to track reported free pages within the Buddy @@ -688,7 +693,7 @@ static __always_inline bool folio_mapping_flags(const struct folio *folio) return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; } -static __always_inline int PageMappingFlags(const struct page *page) +static __always_inline bool PageMappingFlags(const struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; } @@ -709,7 +714,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio) PAGE_MAPPING_MOVABLE; } -static __always_inline int __PageMovable(const struct page *page) +static __always_inline bool __PageMovable(const struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_MOVABLE; @@ -736,7 +741,7 @@ static __always_inline bool PageKsm(const struct page *page) TESTPAGEFLAG_FALSE(Ksm, ksm) #endif -u64 stable_page_flags(struct page *page); +u64 stable_page_flags(const struct page *page); /** * folio_xor_flags_has_waiters - Change some folio flags. @@ -784,7 +789,7 @@ static inline bool folio_test_uptodate(const struct folio *folio) return ret; } -static inline int PageUptodate(const struct page *page) +static inline bool PageUptodate(const struct page *page) { return folio_test_uptodate(page_folio(page)); } @@ -868,9 +873,9 @@ static inline void ClearPageCompound(struct page *page) BUG_ON(!PageHead(page)); ClearPageHead(page); } -PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND) +FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) #else -TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable) +FOLIO_FLAG_FALSE(large_rmappable) #endif #define PG_head_mask ((1UL << PG_head)) @@ -931,7 +936,7 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) #endif /* - * For pages that are never mapped to userspace (and aren't PageSlab), + * For pages that are never mapped to userspace, * page_type may be used. Because it is initialised to -1, we invert the * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and @@ -939,14 +944,18 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) * mistaken for a page type value. */ -#define PAGE_TYPE_BASE 0xf0000000 -/* Reserve 0x0000007f to catch underflows of _mapcount */ -#define PAGE_MAPCOUNT_RESERVE -128 -#define PG_buddy 0x00000080 -#define PG_offline 0x00000100 -#define PG_table 0x00000200 -#define PG_guard 0x00000400 -#define PG_hugetlb 0x00000800 +enum pagetype { + PG_buddy = 0x00000080, + PG_offline = 0x00000100, + PG_table = 0x00000200, + PG_guard = 0x00000400, + PG_hugetlb = 0x00000800, + PG_slab = 0x00001000, + + PAGE_TYPE_BASE = 0xf0000000, + /* Reserve 0x0000007f to catch underflows of _mapcount */ + PAGE_MAPCOUNT_RESERVE = -128, +}; #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) @@ -1041,6 +1050,20 @@ PAGE_TYPE_OPS(Table, table, pgtable) */ PAGE_TYPE_OPS(Guard, guard, guard) +FOLIO_TYPE_OPS(slab, slab) + +/** + * PageSlab - Determine if the page belongs to the slab allocator + * @page: The page to test. + * + * Context: Any context. + * Return: True for slab pages, false for any other kind of page. + */ +static inline bool PageSlab(const struct page *page) +{ + return folio_test_slab(page_folio(page)); +} + #ifdef CONFIG_HUGETLB_PAGE FOLIO_TYPE_OPS(hugetlb, hugetlb) #else @@ -1065,21 +1088,29 @@ static inline bool PageHuge(const struct page *page) * best effort only and inherently racy: there is no way to synchronize with * failing hardware. */ -static inline bool is_page_hwpoison(struct page *page) +static inline bool is_page_hwpoison(const struct page *page) { + const struct folio *folio; + if (PageHWPoison(page)) return true; - return PageHuge(page) && PageHWPoison(compound_head(page)); + folio = page_folio(page); + return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); } -extern bool is_free_buddy_page(struct page *page); +bool is_free_buddy_page(const struct page *page); PAGEFLAG(Isolated, isolated, PF_ANY); static __always_inline int PageAnonExclusive(const struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnon(page), page); - VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); + /* + * HugeTLB stores this information on the head page; THP keeps it per + * page + */ + if (PageHuge(page)) + page = compound_head(page); return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } @@ -1118,7 +1149,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) (1UL << PG_lru | 1UL << PG_locked | \ 1UL << PG_private | 1UL << PG_private_2 | \ 1UL << PG_writeback | 1UL << PG_reserved | \ - 1UL << PG_slab | 1UL << PG_active | \ + 1UL << PG_active | \ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) /* diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 4ac3439282..c16db00670 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -34,8 +34,9 @@ static inline bool is_migrate_isolate(int migratetype) #define REPORT_FAILURE 0x2 void set_pageblock_migratetype(struct page *page, int migratetype); -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype, int *num_movable); + +bool move_freepages_block_isolate(struct zone *zone, struct page *page, + int migratetype); int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype, int flags, gfp_t gfp_flags); diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index be98564191..e4b48a0dda 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -4,7 +4,6 @@ #include <linux/types.h> #include <linux/stacktrace.h> -#include <linux/stackdepot.h> struct pglist_data; @@ -78,7 +77,7 @@ static inline void page_ext_init(void) } #endif -extern struct page_ext *page_ext_get(struct page *page); +extern struct page_ext *page_ext_get(const struct page *page); extern void page_ext_put(struct page_ext *page_ext); static inline void *page_ext_data(struct page_ext *page_ext, @@ -118,7 +117,7 @@ static inline void page_ext_init_flatmem(void) { } -static inline struct page_ext *page_ext_get(struct page *page) +static inline struct page_ext *page_ext_get(const struct page *page) { return NULL; } diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index d8f3448406..89ca0d5dc1 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -6,14 +6,12 @@ #include <linux/page-flags.h> #include <linux/page_ext.h> -#ifdef CONFIG_PAGE_IDLE_FLAG - -#ifndef CONFIG_64BIT +#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) /* * If there is not enough space to store Idle and Young bits in page flags, use * page ext flags instead. */ -static inline bool folio_test_young(struct folio *folio) +static inline bool folio_test_young(const struct folio *folio) { struct page_ext *page_ext = page_ext_get(&folio->page); bool page_young; @@ -52,7 +50,7 @@ static inline bool folio_test_clear_young(struct folio *folio) return page_young; } -static inline bool folio_test_idle(struct folio *folio) +static inline bool folio_test_idle(const struct folio *folio) { struct page_ext *page_ext = page_ext_get(&folio->page); bool page_idle; @@ -60,7 +58,7 @@ static inline bool folio_test_idle(struct folio *folio) if (unlikely(!page_ext)) return false; - page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags); + page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags); page_ext_put(page_ext); return page_idle; @@ -87,61 +85,5 @@ static inline void folio_clear_idle(struct folio *folio) clear_bit(PAGE_EXT_IDLE, &page_ext->flags); page_ext_put(page_ext); } -#endif /* !CONFIG_64BIT */ - -#else /* !CONFIG_PAGE_IDLE_FLAG */ - -static inline bool folio_test_young(struct folio *folio) -{ - return false; -} - -static inline void folio_set_young(struct folio *folio) -{ -} - -static inline bool folio_test_clear_young(struct folio *folio) -{ - return false; -} - -static inline bool folio_test_idle(struct folio *folio) -{ - return false; -} - -static inline void folio_set_idle(struct folio *folio) -{ -} - -static inline void folio_clear_idle(struct folio *folio) -{ -} - -#endif /* CONFIG_PAGE_IDLE_FLAG */ - -static inline bool page_is_young(struct page *page) -{ - return folio_test_young(page_folio(page)); -} - -static inline void set_page_young(struct page *page) -{ - folio_set_young(page_folio(page)); -} - -static inline bool test_and_clear_page_young(struct page *page) -{ - return folio_test_clear_young(page_folio(page)); -} - -static inline bool page_is_idle(struct page *page) -{ - return folio_test_idle(page_folio(page)); -} - -static inline void set_page_idle(struct page *page) -{ - folio_set_idle(page_folio(page)); -} +#endif /* CONFIG_PAGE_IDLE_FLAG && !64BIT */ #endif /* _LINUX_MM_PAGE_IDLE_H */ diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index fdd2a75adb..8c236c651d 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -139,20 +139,15 @@ static inline void folio_ref_sub(struct folio *folio, int nr) page_ref_sub(&folio->page, nr); } -static inline int page_ref_sub_return(struct page *page, int nr) +static inline int folio_ref_sub_return(struct folio *folio, int nr) { - int ret = atomic_sub_return(nr, &page->_refcount); + int ret = atomic_sub_return(nr, &folio->_refcount); if (page_ref_tracepoint_active(page_ref_mod_and_return)) - __page_ref_mod_and_return(page, -nr, ret); + __page_ref_mod_and_return(&folio->page, -nr, ret); return ret; } -static inline int folio_ref_sub_return(struct folio *folio, int nr) -{ - return page_ref_sub_return(&folio->page, nr); -} - static inline void page_ref_inc(struct page *page) { atomic_inc(&page->_refcount); @@ -235,7 +230,13 @@ static inline int folio_ref_dec_return(struct folio *folio) static inline bool page_ref_add_unless(struct page *page, int nr, int u) { - bool ret = atomic_add_unless(&page->_refcount, nr, u); + bool ret = false; + + rcu_read_lock(); + /* avoid writing to the vmemmap area being remapped */ + if (!page_is_fake_head(page) && page_ref_count(page) != u) + ret = atomic_add_unless(&page->_refcount, nr, u); + rcu_read_unlock(); if (page_ref_tracepoint_active(page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 3f2409b968..547e82cdc8 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -28,7 +28,7 @@ enum pageblock_bits { NR_PAGEBLOCK_BITS }; -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE @@ -45,7 +45,11 @@ extern unsigned int pageblock_order; #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ -#else /* CONFIG_HUGETLB_PAGE */ +#elif defined(CONFIG_TRANSPARENT_HUGEPAGE) + +#define pageblock_order min_t(unsigned int, HPAGE_PMD_ORDER, MAX_PAGE_ORDER) + +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ #define pageblock_order MAX_PAGE_ORDER diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 365a649ef1..a0a026d2d2 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping); int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); int filemap_fdatawait_range_keep_errors(struct address_space *mapping, loff_t start_byte, loff_t end_byte); +int filemap_invalidate_inode(struct inode *inode, bool flush, + loff_t start, loff_t end); static inline int filemap_fdatawait(struct address_space *mapping) { @@ -561,24 +563,22 @@ static inline void *detach_page_private(struct page *page) } #ifdef CONFIG_NUMA -struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); +struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); #else -static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) +static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) { - return folio_alloc(gfp, order); + return folio_alloc_noprof(gfp, order); } #endif +#define filemap_alloc_folio(...) \ + alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) + static inline struct page *__page_cache_alloc(gfp_t gfp) { return &filemap_alloc_folio(gfp, 0)->page; } -static inline struct page *page_cache_alloc(struct address_space *x) -{ - return __page_cache_alloc(mapping_gfp_mask(x)); -} - static inline gfp_t readahead_gfp_mask(struct address_space *x) { return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; @@ -1033,7 +1033,7 @@ static inline bool folio_trylock(struct folio *folio) /* * Return true if the page was successfully locked */ -static inline int trylock_page(struct page *page) +static inline bool trylock_page(struct page *page) { return folio_trylock(page_folio(page)); } @@ -1163,11 +1163,6 @@ void folio_end_writeback(struct folio *folio); void wait_for_stable_page(struct page *page); void folio_wait_stable(struct folio *folio); void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); -static inline void __set_page_dirty(struct page *page, - struct address_space *mapping, int warn) -{ - __folio_mark_dirty(page_folio(page), mapping, warn); -} void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); void __folio_cancel_dirty(struct folio *folio); static inline void folio_cancel_dirty(struct folio *folio) @@ -1179,7 +1174,6 @@ static inline void folio_cancel_dirty(struct folio *folio) bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); -int __set_page_dirty_nobuffers(struct page *page); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_MIGRATION diff --git a/include/linux/papr_scm.h b/include/linux/papr_scm.h new file mode 100644 index 0000000000..eb36453813 --- /dev/null +++ b/include/linux/papr_scm.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __LINUX_PAPR_SCM_H +#define __LINUX_PAPR_SCM_H + +/* DIMM health bitmap indicators */ +/* SCM device is unable to persist memory contents */ +#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) +/* SCM device failed to persist memory contents */ +#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) +/* SCM device contents are persisted from previous IPL */ +#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2)) +/* SCM device contents are not persisted from previous IPL */ +#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) +/* SCM device memory life remaining is critically low */ +#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) +/* SCM device will be garded off next IPL due to failure */ +#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) +/* SCM contents cannot persist due to current platform health status */ +#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) +/* SCM device is unable to persist memory contents in certain conditions */ +#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7)) +/* SCM device is encrypted */ +#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8)) +/* SCM device has been scrubbed and locked */ +#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9)) + +#define PAPR_PMEM_SAVE_FAILED (1ULL << (63 - 10)) + +/* Bits status indicators for health bitmap indicating unarmed dimm */ +#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ + PAPR_PMEM_HEALTH_UNHEALTHY) + +/* Bits status indicators for health bitmap indicating unflushed dimm */ +#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) + +/* Bits status indicators for health bitmap indicating unrestored dimm */ +#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) + +/* Bit status indicators for smart event notification */ +#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ + PAPR_PMEM_HEALTH_FATAL | \ + PAPR_PMEM_HEALTH_UNHEALTHY) + +#define PAPR_PMEM_SAVE_MASK (PAPR_PMEM_SAVE_FAILED) + +#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS) +#define PAPR_SCM_PERF_STATS_VERSION 0x1 + +#endif /* __LINUX_PAPR_SCM_H */ diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index abeba356bc..ac8c44dd82 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -59,7 +59,7 @@ static inline void part_stat_set_all(struct block_device *part, int value) #define part_stat_add(part, field, addnd) do { \ __part_stat_add((part), field, addnd); \ - if ((part)->bd_partno) \ + if (bdev_is_partition(part)) \ __part_stat_add(bdev_whole(part), field, addnd); \ } while (0) diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index cc2f70d061..acc5f96161 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -128,6 +128,8 @@ struct pci_epc_mem { * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops * @function_num_map: bitmap to manage physical function number + * @init_complete: flag to indicate whether the EPC initialization is complete + * or not */ struct pci_epc { struct device dev; @@ -143,6 +145,7 @@ struct pci_epc { /* mutex to protect against concurrent access of EP controller */ struct mutex lock; unsigned long function_num_map; + bool init_complete; }; /** @@ -179,8 +182,6 @@ struct pci_epc_bar_desc { /** * struct pci_epc_features - features supported by a EPC device per function * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up - * @core_init_notifier: indicate cores that can notify about their availability - * for initialization * @msi_capable: indicate if the endpoint function has MSI capability * @msix_capable: indicate if the endpoint function has MSI-X capability * @bar: array specifying the hardware description for each BAR @@ -188,7 +189,6 @@ struct pci_epc_bar_desc { */ struct pci_epc_features { unsigned int linkup_notifier : 1; - unsigned int core_init_notifier : 1; unsigned int msi_capable : 1; unsigned int msix_capable : 1; struct pci_epc_bar_desc bar[PCI_STD_NUM_BARS]; @@ -225,6 +225,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, void pci_epc_linkup(struct pci_epc *epc); void pci_epc_linkdown(struct pci_epc *epc); void pci_epc_init_notify(struct pci_epc *epc); +void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf); void pci_epc_bme_notify(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type); diff --git a/include/linux/pci.h b/include/linux/pci.h index 6f9c5ed5eb..cafc5ab1cb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -51,7 +51,7 @@ PCI_STATUS_PARITY) /* Number of reset methods used in pci_reset_fn_methods array in pci.c */ -#define PCI_NUM_RESET_METHODS 7 +#define PCI_NUM_RESET_METHODS 8 #define PCI_RESET_PROBE true #define PCI_RESET_DO_RESET false @@ -1077,8 +1077,6 @@ enum { #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ -#define PCI_IRQ_LEGACY PCI_IRQ_INTX /* Deprecated! Use PCI_IRQ_INTX */ - /* These external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI @@ -1315,7 +1313,6 @@ int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val); int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); int __must_check pci_enable_device(struct pci_dev *dev); -int __must_check pci_enable_device_io(struct pci_dev *dev); int __must_check pci_enable_device_mem(struct pci_dev *dev); int __must_check pci_reenable_device(struct pci_dev *); int __must_check pcim_enable_device(struct pci_dev *pdev); @@ -1648,8 +1645,7 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, */ #define PCI_IRQ_VIRTUAL (1 << 4) -#define PCI_IRQ_ALL_TYPES \ - (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) +#define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX) #include <linux/dmapool.h> @@ -1658,8 +1654,6 @@ struct msix_entry { u16 entry; /* Driver uses to specify entry, OS writes */ }; -struct msi_domain_template; - #ifdef CONFIG_PCI_MSI int pci_msi_vec_count(struct pci_dev *dev); void pci_disable_msi(struct pci_dev *dev); @@ -1692,11 +1686,6 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map); void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); -bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, - unsigned int hwsize, void *data); -struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie, - const struct irq_affinity_desc *affdesc); -void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1719,7 +1708,7 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, struct irq_affinity *aff_desc) { - if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) + if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq) return 1; return -ENOSPC; } @@ -1760,25 +1749,6 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, { return cpu_possible_mask; } - -static inline bool pci_create_ims_domain(struct pci_dev *pdev, - const struct msi_domain_template *template, - unsigned int hwsize, void *data) -{ return false; } - -static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, - union msi_instance_cookie *icookie, - const struct irq_affinity_desc *affdesc) -{ - struct msi_map map = { .index = -ENOSYS, }; - - return map; -} - -static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map) -{ -} - #endif /** @@ -1821,17 +1791,21 @@ extern bool pcie_ports_native; #define pcie_ports_native false #endif -#define PCIE_LINK_STATE_L0S BIT(0) -#define PCIE_LINK_STATE_L1 BIT(1) -#define PCIE_LINK_STATE_CLKPM BIT(2) -#define PCIE_LINK_STATE_L1_1 BIT(3) -#define PCIE_LINK_STATE_L1_2 BIT(4) -#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) -#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) -#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\ - PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\ - PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\ +#define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */ +#define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */ +#define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */ +#define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */ +#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */ +#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */ +#define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\ + PCIE_LINK_STATE_L1 |\ + PCIE_LINK_STATE_L1_1 |\ + PCIE_LINK_STATE_L1_2 |\ + PCIE_LINK_STATE_L1_1_PCIPM |\ PCIE_LINK_STATE_L1_2_PCIPM) +#define PCIE_LINK_STATE_CLKPM BIT(7) +#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\ + PCIE_LINK_STATE_CLKPM) #ifdef CONFIG_PCIEASPM int pci_disable_link_state(struct pci_dev *pdev, int state); @@ -2014,10 +1988,9 @@ static inline int pci_register_driver(struct pci_driver *drv) static inline void pci_unregister_driver(struct pci_driver *drv) { } static inline u8 pci_find_capability(struct pci_dev *dev, int cap) { return 0; } -static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, - int cap) +static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap) { return 0; } -static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) +static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap) { return 0; } static inline u64 pci_get_dsn(struct pci_dev *dev) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4beb29907c..942a587bb9 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2608,6 +2608,8 @@ #define PCI_VENDOR_ID_ALIBABA 0x1ded +#define PCI_VENDOR_ID_CXL 0x1e98 + #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 @@ -3109,6 +3111,7 @@ #define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0 #define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828 #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 +#define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7 #define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8 #define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8 diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h index 30581e2e04..5802e1deef 100644 --- a/include/linux/pds/pds_common.h +++ b/include/linux/pds/pds_common.h @@ -4,6 +4,8 @@ #ifndef _PDS_COMMON_H_ #define _PDS_COMMON_H_ +#include <linux/notifier.h> + #define PDS_CORE_DRV_NAME "pds_core" /* the device's internal addressing uses up to 52 bits */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 8c677f1859..03053de557 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -2,6 +2,7 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H +#include <linux/alloc_tag.h> #include <linux/mmdebug.h> #include <linux/preempt.h> #include <linux/smp.h> @@ -9,12 +10,17 @@ #include <linux/pfn.h> #include <linux/init.h> #include <linux/cleanup.h> +#include <linux/sched.h> #include <asm/percpu.h> /* enough to cover all DEFINE_PER_CPUs in modules */ #ifdef CONFIG_MODULES +#ifdef CONFIG_MEM_ALLOC_PROFILING +#define PERCPU_MODULE_RESERVE (8 << 13) +#else #define PERCPU_MODULE_RESERVE (8 << 10) +#endif #else #define PERCPU_MODULE_RESERVE 0 #endif @@ -121,7 +127,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn); #endif -extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1); extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); @@ -129,14 +134,16 @@ extern bool is_kernel_percpu_address(unsigned long addr); extern void __init setup_per_cpu_areas(void); #endif -extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); -extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); -extern void free_percpu(void __percpu *__pdata); +extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, + gfp_t gfp) __alloc_size(1); extern size_t pcpu_alloc_size(void __percpu *__pdata); -DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) - -extern phys_addr_t per_cpu_ptr_to_phys(void *addr); +#define __alloc_percpu_gfp(_size, _align, _gfp) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp)) +#define __alloc_percpu(_size, _align) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, false, GFP_KERNEL)) +#define __alloc_reserved_percpu(_size, _align) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, true, GFP_KERNEL)) #define alloc_percpu_gfp(type, gfp) \ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ @@ -144,6 +151,15 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); #define alloc_percpu(type) \ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +#define alloc_percpu_noprof(type) \ + ((typeof(type) __percpu *)pcpu_alloc_noprof(sizeof(type), \ + __alignof__(type), false, GFP_KERNEL)) + +extern void free_percpu(void __percpu *__pdata); + +DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) + +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); extern unsigned long pcpu_nr_pages(void); diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 43282e22eb..701974639f 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -39,6 +39,14 @@ struct cpu_hw_events { DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS); /* currently enabled firmware counters */ DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS); + /* The virtual address of the shared memory where counter snapshot will be taken */ + void *snapshot_addr; + /* The physical address of the shared memory where counter snapshot will be taken */ + phys_addr_t snapshot_addr_phys; + /* Boolean flag to indicate setup is already done */ + bool snapshot_set_done; + /* A shadow copy of the counter values to avoid clobbering during multiple SBI calls */ + u64 snapshot_cval_shcopy[RISCV_MAX_COUNTERS]; }; struct riscv_pmu { diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d2a15c0c6f..393fb13733 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -786,6 +786,7 @@ struct perf_event { struct irq_work pending_irq; struct callback_head pending_task; unsigned int pending_work; + struct rcuwait pending_work_wait; atomic_t event_limit; @@ -809,11 +810,8 @@ struct perf_event { u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; -#ifdef CONFIG_BPF_SYSCALL - perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; u64 bpf_cookie; -#endif #ifdef CONFIG_EVENT_TRACING struct trace_event_call *tp_event; @@ -883,6 +881,7 @@ struct perf_event_pmu_context { unsigned int nr_events; unsigned int nr_cgroups; + unsigned int nr_freq; atomic_t refcount; /* event <-> epc */ struct rcu_head rcu_head; @@ -897,6 +896,11 @@ struct perf_event_pmu_context { int rotate_necessary; }; +static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc) +{ + return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active); +} + struct perf_event_groups { struct rb_root tree; u64 index; @@ -1342,8 +1346,10 @@ extern int perf_event_output(struct perf_event *event, struct pt_regs *regs); static inline bool -__is_default_overflow_handler(perf_overflow_handler_t overflow_handler) +is_default_overflow_handler(struct perf_event *event) { + perf_overflow_handler_t overflow_handler = event->overflow_handler; + if (likely(overflow_handler == perf_event_output_forward)) return true; if (unlikely(overflow_handler == perf_event_output_backward)) @@ -1351,22 +1357,6 @@ __is_default_overflow_handler(perf_overflow_handler_t overflow_handler) return false; } -#define is_default_overflow_handler(event) \ - __is_default_overflow_handler((event)->overflow_handler) - -#ifdef CONFIG_BPF_SYSCALL -static inline bool uses_default_overflow_handler(struct perf_event *event) -{ - if (likely(is_default_overflow_handler(event))) - return true; - - return __is_default_overflow_handler(event->orig_overflow_handler); -} -#else -#define uses_default_overflow_handler(event) \ - is_default_overflow_handler(event) -#endif - extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, @@ -1697,6 +1687,14 @@ perf_event_addr_filters(struct perf_event *event) return ifh; } +static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) +{ + /* Only the parent has fasync state */ + if (event->parent) + event = event->parent; + return &event->fasync; +} + extern void perf_event_addr_filters_sync(struct perf_event *event); extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id); diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h new file mode 100644 index 0000000000..18cd0c0c73 --- /dev/null +++ b/include/linux/pgalloc_tag.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * page allocation tagging + */ +#ifndef _LINUX_PGALLOC_TAG_H +#define _LINUX_PGALLOC_TAG_H + +#include <linux/alloc_tag.h> + +#ifdef CONFIG_MEM_ALLOC_PROFILING + +#include <linux/page_ext.h> + +extern struct page_ext_operations page_alloc_tagging_ops; + +static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) +{ + return (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); +} + +static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref) +{ + return (void *)ref - page_alloc_tagging_ops.offset; +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline union codetag_ref *get_page_tag_ref(struct page *page) +{ + if (page) { + struct page_ext *page_ext = page_ext_get(page); + + if (page_ext) + return codetag_ref_from_page_ext(page_ext); + } + return NULL; +} + +static inline void put_page_tag_ref(union codetag_ref *ref) +{ + if (WARN_ON(!ref)) + return; + + page_ext_put(page_ext_from_codetag_ref(ref)); +} + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + alloc_tag_add(ref, task->alloc_tag, PAGE_SIZE * nr); + put_page_tag_ref(ref); + } + } +} + +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + alloc_tag_sub(ref, PAGE_SIZE * nr); + put_page_tag_ref(ref); + } + } +} + +static inline void pgalloc_tag_split(struct page *page, unsigned int nr) +{ + int i; + struct page_ext *first_page_ext; + struct page_ext *page_ext; + union codetag_ref *ref; + struct alloc_tag *tag; + + if (!mem_alloc_profiling_enabled()) + return; + + first_page_ext = page_ext = page_ext_get(page); + if (unlikely(!page_ext)) + return; + + ref = codetag_ref_from_page_ext(page_ext); + if (!ref->ct) + goto out; + + tag = ct_to_alloc_tag(ref->ct); + page_ext = page_ext_next(page_ext); + for (i = 1; i < nr; i++) { + /* Set new reference to point to the original tag */ + alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag); + page_ext = page_ext_next(page_ext); + } +out: + page_ext_put(first_page_ext); +} + +static inline struct alloc_tag *pgalloc_tag_get(struct page *page) +{ + struct alloc_tag *tag = NULL; + + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + alloc_tag_sub_check(ref); + if (ref) { + if (ref->ct) + tag = ct_to_alloc_tag(ref->ct); + put_page_tag_ref(ref); + } + } + + return tag; +} + +static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) +{ + if (mem_alloc_profiling_enabled() && tag) + this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); +} + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; } +static inline void put_page_tag_ref(union codetag_ref *ref) {} +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) {} +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} +static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {} +static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } +static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + +#endif /* _LINUX_PGALLOC_TAG_H */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 85fc7554cd..18019f037b 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -50,6 +50,8 @@ #define pmd_pgtable(pmd) pmd_page(pmd) #endif +#define pmd_folio(pmd) page_folio(pmd_page(pmd)) + /* * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] * @@ -149,9 +151,7 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ -#ifndef pgd_offset_k #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) -#endif /* * In many cases it is known that a virtual address is mapped at PMD or PTE @@ -459,6 +459,50 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #endif +#ifndef clear_young_dirty_ptes +/** + * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the + * same folio as old/clean. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to mark old/clean. + * @flags: Flags to modify the PTE batch semantics. + * + * May be overridden by the architecture; otherwise, implemented by + * get_and_clear/modify/set for each pte in the range. + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + unsigned int nr, cydp_t flags) +{ + pte_t pte; + + for (;;) { + if (flags == CYDP_CLEAR_YOUNG) + ptep_test_and_clear_young(vma, addr, ptep); + else { + pte = ptep_get_and_clear(vma->vm_mm, addr, ptep); + if (flags & CYDP_CLEAR_YOUNG) + pte = pte_mkold(pte); + if (flags & CYDP_CLEAR_DIRTY) + pte = pte_mkclean(pte); + set_pte_at(vma->vm_mm, addr, ptep, pte); + } + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -708,6 +752,35 @@ static inline void pte_clear_not_present_full(struct mm_struct *mm, } #endif +#ifndef clear_not_present_full_ptes +/** + * clear_not_present_full_ptes - Clear multiple not present PTEs which are + * consecutive in the pgtable. + * @mm: Address space the ptes represent. + * @addr: Address of the first pte. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over pte_clear_not_present_full(). + * + * Context: The caller holds the page table lock. The PTEs are all not present. + * The PTEs are all in the same PMD. + */ +static inline void clear_not_present_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + pte_clear_not_present_full(mm, addr, ptep, full); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, @@ -1052,7 +1125,7 @@ static inline int arch_unmap_one(struct mm_struct *mm, * prototypes must be defined in the arch-specific asm/pgtable.h file. */ #ifndef __HAVE_ARCH_PREPARE_TO_SWAP -static inline int arch_prepare_to_swap(struct page *page) +static inline int arch_prepare_to_swap(struct folio *folio) { return 0; } @@ -1079,7 +1152,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #endif #ifndef __HAVE_ARCH_MOVE_PTE -#define move_pte(pte, prot, old_addr, new_addr) (pte) +#define move_pte(pte, old_addr, new_addr) (pte) #endif #ifndef pte_accessible @@ -1770,11 +1843,25 @@ typedef unsigned int pgtbl_mod_mask; #endif /* - * p?d_leaf() - true if this entry is a final mapping to a physical address. - * This differs from p?d_huge() by the fact that they are always available (if - * the architecture supports large pages at the appropriate level) even - * if CONFIG_HUGETLB_PAGE is not defined. - * Only meaningful when called on a valid entry. + * pXd_leaf() is the API to check whether a pgtable entry is a huge page + * mapping. It should work globally across all archs, without any + * dependency on CONFIG_* options. For architectures that do not support + * huge mappings on specific levels, below fallbacks will be used. + * + * A leaf pgtable entry should always imply the following: + * + * - It is a "present" entry. IOW, before using this API, please check it + * with pXd_present() first. NOTE: it may not always mean the "present + * bit" is set. For example, PROT_NONE entries are always "present". + * + * - It should _never_ be a swap entry of any type. Above "present" check + * should have guarded this, but let's be crystal clear on this. + * + * - It should contain a huge PFN, which points to a huge page larger than + * PAGE_SIZE of the platform. The PFN format isn't important here. + * + * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(), + * pXd_devmap(), or hugetlb mappings). */ #ifndef pgd_leaf #define pgd_leaf(x) false @@ -1806,6 +1893,20 @@ typedef unsigned int pgtbl_mod_mask; #endif /* + * We always define pmd_pfn for all archs as it's used in lots of generic + * code. Now it happens too for pud_pfn (and can happen for larger + * mappings too in the future; we're not there yet). Instead of defining + * it for all archs (like pmd_pfn), provide a fallback. + * + * Note that returning 0 here means any arch that didn't define this can + * get severely wrong when it hits a real pud leaf. It's arch's + * responsibility to properly define it when a huge pud is possible. + */ +#ifndef pud_pfn +#define pud_pfn(x) 0 +#endif + +/* * Some architectures have MMUs that are configurable or selectable at boot * time. These lead to variable PTRS_PER_x. For statically allocated arrays it * helps to have a static maximum value. diff --git a/include/linux/phy.h b/include/linux/phy.h index a62d86bce1..3be430cf31 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -778,6 +778,7 @@ struct phy_device { /* Generic phy_device::dev_flags */ #define PHY_F_NO_IRQ 0x80000000 +#define PHY_F_RXC_ALWAYS_ON 0x40000000 static inline struct phy_device *to_phy_device(const struct device *dev) { diff --git a/include/linux/phy/phy-dp.h b/include/linux/phy/phy-dp.h index 18cad23642..9cce5766bc 100644 --- a/include/linux/phy/phy-dp.h +++ b/include/linux/phy/phy-dp.h @@ -8,6 +8,9 @@ #include <linux/types.h> +#define PHY_SUBMODE_DP 0 +#define PHY_SUBMODE_EDP 1 + /** * struct phy_configure_opts_dp - DisplayPort PHY configuration set * diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 9a57deefcb..5ea6b2ad23 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -138,6 +138,9 @@ enum phylink_op_type { * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM. + * @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY. + * The PHY driver should start the clock signal as soon as + * possible and avoid stopping it during suspend events. * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. @@ -150,6 +153,7 @@ struct phylink_config { enum phylink_op_type type; bool poll_fixed_state; bool mac_managed_pm; + bool mac_requires_rxc; bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); @@ -392,6 +396,10 @@ struct phylink_pcs_ops; * @phylink: pointer to &struct phylink_config * @neg_mode: provide PCS neg mode via "mode" argument * @poll: poll the PCS for link changes + * @rxc_always_on: The MAC driver requires the reference clock + * to always be on. Standalone PCS drivers which + * do not have access to a PHY device can check + * this instead of PHY_F_RXC_ALWAYS_ON. * * This structure is designed to be embedded within the PCS private data, * and will be passed between phylink and the PCS. @@ -404,6 +412,7 @@ struct phylink_pcs { struct phylink *phylink; bool neg_mode; bool poll; + bool rxc_always_on; }; /** @@ -418,6 +427,8 @@ struct phylink_pcs { * @pcs_an_restart: restart 802.3z BaseX autonegotiation. * @pcs_link_up: program the PCS for the resolved link configuration * (where necessary). + * @pcs_pre_init: configure PCS components necessary for MAC hardware + * initialization e.g. RX clock for stmmac. */ struct phylink_pcs_ops { int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported, @@ -437,6 +448,7 @@ struct phylink_pcs_ops { void (*pcs_an_restart)(struct phylink_pcs *pcs); void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); + int (*pcs_pre_init)(struct phylink_pcs *pcs); }; #if 0 /* For kernel-doc purposes only. */ @@ -542,6 +554,34 @@ void pcs_an_restart(struct phylink_pcs *pcs); */ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); + +/** + * pcs_pre_init() - Configure PCS components necessary for MAC initialization + * @pcs: a pointer to a &struct phylink_pcs. + * + * This function can be called by MAC drivers through the + * phylink_pcs_pre_init() wrapper, before their hardware is initialized. It + * should not be called after the link is brought up, as reconfiguring the PCS + * at this point could break the link. + * + * Some MAC devices require specific hardware initialization to be performed by + * their associated PCS device before they can properly initialize their own + * hardware. An example of this is the initialization of stmmac controllers, + * which requires an active REF_CLK signal to be provided by the PHY/PCS. + * + * By calling phylink_pcs_pre_init(), MAC drivers can ensure that the PCS is + * setup in a way that allows for successful hardware initialization. + * + * The specific configuration performed by pcs_pre_init() is dependent on the + * model of PCS and the requirements of the MAC device attached to it. PCS + * driver authors should consider whether their target device is to be used in + * conjunction with a MAC device whose driver calls phylink_pcs_pre_init(). MAC + * driver authors should document their requirements for the PCS + * pre-initialization. + * + */ +int pcs_pre_init(struct phylink_pcs *pcs); + #endif struct phylink *phylink_create(struct phylink_config *, @@ -561,6 +601,8 @@ void phylink_disconnect_phy(struct phylink *); void phylink_mac_change(struct phylink *, bool up); void phylink_pcs_change(struct phylink_pcs *, bool up); +int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs); + void phylink_start(struct phylink *); void phylink_stop(struct phylink *); diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index c8645b2ed3..b9c8520b4b 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -26,16 +26,6 @@ struct davinci_mcasp_pdata { struct gen_pool *sram_pool; /* - * If McBSP peripheral gets the clock from an external pin, - * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR - * and MCBSP_CLKS. - * Depending on different hardware connections it is possible - * to use this setting to change the behaviour of McBSP - * driver. - */ - int clk_input_pin; - - /* * This flag works when both clock and FS are outputs for the cpu * and makes clock more accurate (FS is not symmetrical and the * clock is very fast. @@ -91,11 +81,6 @@ enum { MCASP_VERSION_OMAP, /* OMAP4/5 */ }; -enum mcbsp_clk_input_pin { - MCBSP_CLKR = 0, /* as in DM365 */ - MCBSP_CLKS, -}; - #define INACTIVE_MODE 0 #define TX_MODE 1 #define RX_MODE 2 diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h index 5e4c2c272a..816a4cd3cc 100644 --- a/include/linux/platform_data/i2c-mux-gpio.h +++ b/include/linux/platform_data/i2c-mux-gpio.h @@ -18,7 +18,6 @@ * @values: Array of bitmasks of GPIO settings (low/high) for each * position * @n_values: Number of multiplexer positions (busses to instantiate) - * @classes: Optional I2C auto-detection classes * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used */ struct i2c_mux_gpio_platform_data { @@ -26,7 +25,6 @@ struct i2c_mux_gpio_platform_data { int base_nr; const unsigned *values; int n_values; - const unsigned *classes; unsigned idle; }; diff --git a/include/linux/platform_data/omap1_bl.h b/include/linux/platform_data/omap1_bl.h index 5e8b17d77a..3d0bab31a0 100644 --- a/include/linux/platform_data/omap1_bl.h +++ b/include/linux/platform_data/omap1_bl.h @@ -6,7 +6,6 @@ struct omap_backlight_config { int default_intensity; - int (*set_power)(struct device *dev, int state); }; #endif diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h index 3b400b1919..9e3c15b4ac 100644 --- a/include/linux/platform_data/spi-omap2-mcspi.h +++ b/include/linux/platform_data/spi-omap2-mcspi.h @@ -16,9 +16,6 @@ struct omap2_mcspi_platform_config { struct omap2_mcspi_device_config { unsigned turbo_mode:1; - - /* toggle chip select after every word */ - unsigned cs_per_word:1; }; #endif diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index eb556f988d..d8f15770a5 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -71,7 +71,6 @@ struct sysc_regbits { #define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12) #define SYSC_QUIRK_SWSUP_SIDLE BIT(11) #define SYSC_QUIRK_EXT_OPT_CLOCK BIT(10) -#define SYSC_QUIRK_LEGACY_IDLE BIT(9) #define SYSC_QUIRK_RESET_STATUS BIT(8) #define SYSC_QUIRK_NO_IDLE BIT(7) #define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h deleted file mode 100644 index f76fa393b8..0000000000 --- a/include/linux/platform_data/uio_pruss.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * include/linux/platform_data/uio_pruss.h - * - * Platform data for uio_pruss driver - * - * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ - */ - -#ifndef _UIO_PRUSS_H_ -#define _UIO_PRUSS_H_ - -/* To configure the PRUSS INTC base offset for UIO driver */ -struct uio_pruss_pdata { - u32 pintc_base; - struct gen_pool *sram_pool; -}; -#endif /* _UIO_PRUSS_H_ */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index ab1c7deff1..3eb5cd6773 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -71,6 +71,7 @@ #define ASUS_WMI_DEVID_LID_FLIP 0x00060062 #define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077 #define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E +#define ASUS_WMI_DEVID_MINI_LED_MODE2 0x0005002E /* Storage */ #define ASUS_WMI_DEVID_CARDREADER 0x00080013 @@ -127,13 +128,18 @@ /* gpu mux switch, 0 = dGPU, 1 = Optimus */ #define ASUS_WMI_DEVID_GPU_MUX 0x00090016 +#define ASUS_WMI_DEVID_GPU_MUX_VIVO 0x00090026 /* TUF laptop RGB modes/colours */ #define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056 +#define ASUS_WMI_DEVID_TUF_RGB_MODE2 0x0010005A /* TUF laptop RGB power/state */ #define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057 +/* Bootup sound control */ +#define ASUS_WMI_DEVID_BOOT_SOUND 0x00130022 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index e5cbb6841f..f5492ed413 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -36,6 +36,7 @@ struct platform_profile_handler { int platform_profile_register(struct platform_profile_handler *pprof); int platform_profile_remove(void); +int platform_profile_cycle(void); void platform_profile_notify(void); #endif /*_PLATFORM_PROFILE_H_*/ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 065a473823..dd7c8441af 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -476,6 +476,8 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); +int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, + unsigned long *kHz); static inline void dev_pm_opp_of_unregister_em(struct device *dev) { em_dev_unregister_perf_domain(dev); @@ -539,6 +541,12 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) { } +static inline int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, + unsigned long *kHz) +{ + return -EOPNOTSUPP; +} + static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { return -EOPNOTSUPP; diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 6eb9adaef5..76cd1f9f13 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -107,7 +107,7 @@ extern void wakeup_sources_read_unlock(int idx); extern struct wakeup_source *wakeup_sources_walk_start(void); extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); -extern int device_wakeup_disable(struct device *dev); +extern void device_wakeup_disable(struct device *dev); extern void device_set_wakeup_capable(struct device *dev, bool capable); extern int device_set_wakeup_enable(struct device *dev, bool enable); extern void __pm_stay_awake(struct wakeup_source *ws); @@ -154,10 +154,9 @@ static inline int device_wakeup_enable(struct device *dev) return 0; } -static inline int device_wakeup_disable(struct device *dev) +static inline void device_wakeup_disable(struct device *dev) { dev->power.should_wakeup = false; - return 0; } static inline int device_set_wakeup_enable(struct device *dev, bool enable) @@ -235,11 +234,10 @@ static inline int device_init_wakeup(struct device *dev, bool enable) if (enable) { device_set_wakeup_capable(dev, true); return device_wakeup_enable(dev); - } else { - device_wakeup_disable(dev); - device_set_wakeup_capable(dev, false); - return 0; } + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); + return 0; } #endif /* _LINUX_PM_WAKEUP_H */ diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 314892a6de..7f2ff95d2d 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -467,6 +467,8 @@ int compare_pnp_id(struct pnp_id *pos, const char *id); int pnp_register_driver(struct pnp_driver *drv); void pnp_unregister_driver(struct pnp_driver *drv); +bool dev_is_pnp(const struct device *dev); + #else /* device management */ @@ -498,6 +500,8 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; } static inline void pnp_unregister_driver(struct pnp_driver *drv) { } +static inline bool dev_is_pnp(const struct device *dev) { return false; } + #endif /* CONFIG_PNP */ /** diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index b9e5bd2b42..5180dc9f17 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -47,16 +47,8 @@ struct bq27xxx_access_methods { }; struct bq27xxx_reg_cache { - int temperature; - int time_to_empty; - int time_to_empty_avg; - int time_to_full; - int charge_full; - int cycle_count; int capacity; - int energy; int flags; - int health; }; struct bq27xxx_device_info { diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7233e9cf1b..ce76f1a457 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -481,4 +481,45 @@ DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) +#ifdef CONFIG_PREEMPT_DYNAMIC + +extern bool preempt_model_none(void); +extern bool preempt_model_voluntary(void); +extern bool preempt_model_full(void); + +#else + +static inline bool preempt_model_none(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_NONE); +} +static inline bool preempt_model_voluntary(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); +} +static inline bool preempt_model_full(void) +{ + return IS_ENABLED(CONFIG_PREEMPT); +} + +#endif + +static inline bool preempt_model_rt(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_RT); +} + +/* + * Does the preemption model allow non-cooperative preemption? + * + * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with + * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the + * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the + * PREEMPT_NONE model. + */ +static inline bool preempt_model_preemptible(void) +{ + return preempt_model_full() || preempt_model_rt(); +} + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h index 2fde40cc96..65c5184470 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -71,7 +71,7 @@ extern void console_verbose(void); /* strlen("ratelimit") + 1 */ #define DEVKMSG_STR_MAX_SIZE 10 -extern char devkmsg_log_str[]; +extern char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE]; struct ctl_table; extern int suppress_printk; @@ -192,6 +192,7 @@ void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; void printk_trigger_flush(void); +void console_replay_all(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -271,6 +272,9 @@ static inline void dump_stack(void) static inline void printk_trigger_flush(void) { } +static inline void console_replay_all(void) +{ +} #endif bool this_cpu_in_panic(void); diff --git a/include/linux/property.h b/include/linux/property.h index 3a1045eb78..61fc20e5f8 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -13,6 +13,7 @@ #include <linux/args.h> #include <linux/array_size.h> #include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/fwnode.h> #include <linux/stddef.h> #include <linux/types.h> @@ -174,13 +175,32 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev, for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) +#define device_for_each_child_node_scoped(dev, child) \ + for (struct fwnode_handle *child __free(fwnode_handle) = \ + device_get_next_child_node(dev, NULL); \ + child; child = device_get_next_child_node(dev, child)) + struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode, const char *childname); struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname); struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); -void fwnode_handle_put(struct fwnode_handle *fwnode); + +/** + * fwnode_handle_put - Drop reference to a device node + * @fwnode: Pointer to the device node to drop the reference to. + * + * This has to be used when terminating device_for_each_child_node() iteration + * with break or return to prevent stale device node references from being left + * behind. + */ +static inline void fwnode_handle_put(struct fwnode_handle *fwnode) +{ + fwnode_call_void_op(fwnode, put); +} + +DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T)) int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 5ce0cd7695..6eec24ffa8 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -15,11 +15,14 @@ struct pse_controller_dev; /** * struct pse_control_config - PSE control/channel configuration. * - * @admin_cotrol: set PoDL PSE admin control as described in + * @podl_admin_control: set PoDL PSE admin control as described in * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl + * @c33_admin_control: set PSE admin control as described in + * IEEE 802.3-2022 30.9.1.2.1 acPSEAdminControl */ struct pse_control_config { - enum ethtool_podl_pse_admin_state admin_cotrol; + enum ethtool_podl_pse_admin_state podl_admin_control; + enum ethtool_c33_pse_admin_state c33_admin_control; }; /** @@ -29,25 +32,36 @@ struct pse_control_config { * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState * @podl_pw_status: power detection status of the PoDL PSE. * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus: + * @c33_admin_state: operational state of the PSE + * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState + * @c33_pw_status: power detection status of the PSE. + * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus: */ struct pse_control_status { enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; + enum ethtool_c33_pse_admin_state c33_admin_state; + enum ethtool_c33_pse_pw_d_status c33_pw_status; }; /** * struct pse_controller_ops - PSE controller driver callbacks * * @ethtool_get_status: get PSE control status for ethtool interface - * @ethtool_set_config: set PSE control configuration over ethtool interface + * @setup_pi_matrix: setup PI matrix of the PSE controller + * @pi_is_enabled: Return 1 if the PSE PI is enabled, 0 if not. + * May also return negative errno. + * @pi_enable: Configure the PSE PI as enabled. + * @pi_disable: Configure the PSE PI as disabled. */ struct pse_controller_ops { int (*ethtool_get_status)(struct pse_controller_dev *pcdev, unsigned long id, struct netlink_ext_ack *extack, struct pse_control_status *status); - int (*ethtool_set_config)(struct pse_controller_dev *pcdev, - unsigned long id, struct netlink_ext_ack *extack, - const struct pse_control_config *config); + int (*setup_pi_matrix)(struct pse_controller_dev *pcdev); + int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id); + int (*pi_enable)(struct pse_controller_dev *pcdev, int id); + int (*pi_disable)(struct pse_controller_dev *pcdev, int id); }; struct module; @@ -55,6 +69,40 @@ struct device_node; struct of_phandle_args; struct pse_control; +/* PSE PI pairset pinout can either be Alternative A or Alternative B */ +enum pse_pi_pairset_pinout { + ALTERNATIVE_A, + ALTERNATIVE_B, +}; + +/** + * struct pse_pi_pairset - PSE PI pairset entity describing the pinout + * alternative ant its phandle + * + * @pinout: description of the pinout alternative + * @np: device node pointer describing the pairset phandle + */ +struct pse_pi_pairset { + enum pse_pi_pairset_pinout pinout; + struct device_node *np; +}; + +/** + * struct pse_pi - PSE PI (Power Interface) entity as described in + * IEEE 802.3-2022 145.2.4 + * + * @pairset: table of the PSE PI pinout alternative for the two pairset + * @np: device node pointer of the PSE PI node + * @rdev: regulator represented by the PSE PI + * @admin_state_enabled: PI enabled state + */ +struct pse_pi { + struct pse_pi_pairset pairset[2]; + struct device_node *np; + struct regulator_dev *rdev; + bool admin_state_enabled; +}; + /** * struct pse_controller_dev - PSE controller entity that might * provide multiple PSE controls @@ -64,10 +112,11 @@ struct pse_control; * @pse_control_head: head of internal list of requested PSE controls * @dev: corresponding driver model device struct * @of_pse_n_cells: number of cells in PSE line specifiers - * @of_xlate: translation function to translate from specifier as found in the - * device tree to id as given to the PSE control ops * @nr_lines: number of PSE controls in this controller device * @lock: Mutex for serialization access to the PSE controller + * @types: types of the PSE controller + * @pi: table of PSE PIs described in this controller device + * @no_of_pse_pi: flag set if the pse_pis devicetree node is not used */ struct pse_controller_dev { const struct pse_controller_ops *ops; @@ -76,10 +125,11 @@ struct pse_controller_dev { struct list_head pse_control_head; struct device *dev; int of_pse_n_cells; - int (*of_xlate)(struct pse_controller_dev *pcdev, - const struct of_phandle_args *pse_spec); unsigned int nr_lines; struct mutex lock; + enum ethtool_pse_types types; + struct pse_pi *pi; + bool no_of_pse_pi; }; #if IS_ENABLED(CONFIG_PSE_CONTROLLER) @@ -99,6 +149,9 @@ int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config); +bool pse_has_podl(struct pse_control *psec); +bool pse_has_c33(struct pse_control *psec); + #else static inline struct pse_control *of_pse_control_get(struct device_node *node) @@ -124,6 +177,16 @@ static inline int pse_ethtool_set_config(struct pse_control *psec, return -EOPNOTSUPP; } +static inline bool pse_has_podl(struct pse_control *psec) +{ + return false; +} + +static inline bool pse_has_c33(struct pse_control *psec) +{ + return false; +} + #endif #endif diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 808f9d3ee5..fd037c127b 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -464,11 +464,11 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, /* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See * documentation for vmalloc for which of them are legal. */ -static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) +static inline void **__ptr_ring_init_queue_alloc_noprof(unsigned int size, gfp_t gfp) { if (size > KMALLOC_MAX_SIZE / sizeof(void *)) return NULL; - return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); + return kvmalloc_array_noprof(size, sizeof(void *), gfp | __GFP_ZERO); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) @@ -484,9 +484,9 @@ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) r->batch = 1; } -static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +static inline int ptr_ring_init_noprof(struct ptr_ring *r, int size, gfp_t gfp) { - r->queue = __ptr_ring_init_queue_alloc(size, gfp); + r->queue = __ptr_ring_init_queue_alloc_noprof(size, gfp); if (!r->queue) return -ENOMEM; @@ -497,6 +497,7 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) return 0; } +#define ptr_ring_init(...) alloc_hooks(ptr_ring_init_noprof(__VA_ARGS__)) /* * Return entries into ring. Destroy entries that don't fit. @@ -587,11 +588,11 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ -static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, +static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; - void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **queue = __ptr_ring_init_queue_alloc_noprof(size, gfp); void **old; if (!queue) @@ -609,6 +610,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, return 0; } +#define ptr_ring_resize(...) alloc_hooks(ptr_ring_resize_noprof(__VA_ARGS__)) /* * Note: producer lock is nested within consumer lock, so if you @@ -616,21 +618,21 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ -static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, - unsigned int nrings, - int size, - gfp_t gfp, void (*destroy)(void *)) +static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings, + unsigned int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; void ***queues; int i; - queues = kmalloc_array(nrings, sizeof(*queues), gfp); + queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp); if (!queues) goto noqueues; for (i = 0; i < nrings; ++i) { - queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp); if (!queues[i]) goto nomem; } @@ -660,6 +662,8 @@ nomem: noqueues: return -ENOMEM; } +#define ptr_ring_resize_multiple(...) \ + alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__)) static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) { diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 4a6568dfdf..60b92c2c75 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -2,6 +2,7 @@ #ifndef __LINUX_PWM_H #define __LINUX_PWM_H +#include <linux/device.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/of.h> @@ -272,11 +273,11 @@ struct pwm_ops { * @npwm: number of PWMs controlled by this chip * @of_xlate: request a PWM device given a device tree PWM specifier * @atomic: can the driver's ->apply() be called in atomic context - * @driver_data: Private pointer for driver specific info + * @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { - struct device *dev; + struct device dev; const struct pwm_ops *ops; struct module *owner; unsigned int id; @@ -287,31 +288,23 @@ struct pwm_chip { bool atomic; /* only used internally by the PWM framework */ - void *driver_data; - struct pwm_device *pwms; + bool uses_pwmchip_alloc; + struct pwm_device pwms[] __counted_by(npwm); }; static inline struct device *pwmchip_parent(const struct pwm_chip *chip) { - return chip->dev; + return chip->dev.parent; } static inline void *pwmchip_get_drvdata(struct pwm_chip *chip) { - /* - * After pwm_chip got a dedicated struct device, this can be replaced by - * dev_get_drvdata(&chip->dev); - */ - return chip->driver_data; + return dev_get_drvdata(&chip->dev); } static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data) { - /* - * After pwm_chip got a dedicated struct device, this can be replaced by - * dev_set_drvdata(&chip->dev, data); - */ - chip->driver_data = data; + dev_set_drvdata(&chip->dev, data); } #if IS_ENABLED(CONFIG_PWM) @@ -628,17 +621,4 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) } #endif -#ifdef CONFIG_PWM_SYSFS -void pwmchip_sysfs_export(struct pwm_chip *chip); -void pwmchip_sysfs_unexport(struct pwm_chip *chip); -#else -static inline void pwmchip_sysfs_export(struct pwm_chip *chip) -{ -} - -static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) -{ -} -#endif /* CONFIG_PWM_SYSFS */ - #endif /* __LINUX_PWM_H */ diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index cdd2ac366b..0bf80e98d5 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -19,7 +19,6 @@ struct platform_pwm_backlight_data { int (*notify)(struct device *dev, int brightness); void (*notify_after)(struct device *dev, int brightness); void (*exit)(struct device *dev); - int (*check_fb)(struct device *dev, struct fb_info *info); }; #endif diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index cd1973e6ac..844a2743ca 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -217,9 +217,9 @@ enum pxa_ssp_type { PXA27x_SSP, PXA3xx_SSP, PXA168_SSP, - MMP2_SSP, PXA910_SSP, CE4100_SSP, + MMP2_SSP, MRFLD_SSP, QUARK_X1000_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h new file mode 100644 index 0000000000..dbbb6a063d --- /dev/null +++ b/include/linux/qat/qat_mig_dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef QAT_MIG_DEV_H_ +#define QAT_MIG_DEV_H_ + +struct pci_dev; + +struct qat_mig_dev { + void *parent_accel_dev; + u8 *state; + u32 setup_size; + u32 remote_setup_size; + u32 state_size; + s32 vf_id; +}; + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id); +int qat_vfmig_init(struct qat_mig_dev *mdev); +void qat_vfmig_cleanup(struct qat_mig_dev *mdev); +void qat_vfmig_reset(struct qat_mig_dev *mdev); +int qat_vfmig_open(struct qat_mig_dev *mdev); +void qat_vfmig_close(struct qat_mig_dev *mdev); +int qat_vfmig_suspend(struct qat_mig_dev *mdev); +int qat_vfmig_resume(struct qat_mig_dev *mdev); +int qat_vfmig_save_state(struct qat_mig_dev *mdev); +int qat_vfmig_save_setup(struct qat_mig_dev *mdev); +int qat_vfmig_load_state(struct qat_mig_dev *mdev); +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size); +void qat_vfmig_destroy(struct qat_mig_dev *mdev); + +#endif /*QAT_MIG_DEV_H_*/ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 17d7ed5f3a..dfd2399f2c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -401,15 +401,15 @@ static inline int debug_lockdep_rcu_enabled(void) } \ } while (0) -#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) +#ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); } -#else /* #ifdef CONFIG_PROVE_RCU */ +#else // #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { } -#endif /* #else #ifdef CONFIG_PROVE_RCU */ +#endif // #else // #ifndef CONFIG_PREEMPT_RCU #define rcu_sleep_check() \ do { \ @@ -809,9 +809,9 @@ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); + rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ __release(RCU); __rcu_read_unlock(); - rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ } /** @@ -1090,6 +1090,18 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) extern int rcu_expedited; extern int rcu_normal; -DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock()) +DEFINE_LOCK_GUARD_0(rcu, + do { + rcu_read_lock(); + /* + * sparse doesn't call the cleanup function, + * so just release immediately and don't track + * the context. We don't need to anyway, since + * the whole point of the guard is to not need + * the explicit unlock. + */ + __release(RCU); + } while (0), + rcu_read_unlock()) #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index d07f084880..303ab9bee1 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -19,18 +19,18 @@ struct rcu_synchronize { }; void wakeme_after_rcu(struct rcu_head *head); -void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, +void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); -#define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ - __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ - __crcu_array, __rs_array); \ +#define _wait_rcu_gp(checktiny, state, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, state, ARRAY_SIZE(__crcu_array), __crcu_array, __rs_array); \ } while (0) -#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) +#define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__) +#define wait_rcu_gp_state(state, ...) _wait_rcu_gp(false, state, __VA_ARGS__) /** * synchronize_rcu_mult - Wait concurrently for multiple grace periods @@ -54,7 +54,7 @@ do { \ * grace period. */ #define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__) static inline void cond_resched_rcu(void) { diff --git a/include/linux/regmap.h b/include/linux/regmap.h index d470303b1b..a6bc2980a9 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -297,15 +297,6 @@ typedef void (*regmap_unlock)(void *); * performed on such table (a register is no increment * readable if it belongs to one of the ranges specified * by rd_noinc_table). - * @disable_locking: This regmap is either protected by external means or - * is guaranteed not to be accessed from multiple threads. - * Don't use any locking mechanisms. - * @lock: Optional lock callback (overrides regmap's default lock - * function, based on spinlock or mutex). - * @unlock: As above for unlocking. - * @lock_arg: this field is passed as the only argument of lock/unlock - * functions (ignored in case regular lock/unlock functions - * are not overridden). * @reg_read: Optional callback that if filled will be used to perform * all the reads from the registers. Should only be provided for * devices whose read operation cannot be represented as a simple @@ -323,6 +314,7 @@ typedef void (*regmap_unlock)(void *); * @write: Same as above for writing. * @max_raw_read: Max raw read size that can be used on the device. * @max_raw_write: Max raw write size that can be used on the device. + * @can_sleep: Optional, specifies whether regmap operations can sleep. * @fast_io: Register IO is fast. Use a spinlock instead of a mutex * to perform locking. This field is ignored if custom lock/unlock * functions are used (see fields lock/unlock of struct regmap_config). @@ -331,6 +323,15 @@ typedef void (*regmap_unlock)(void *); * Use it only for "no-bus" cases. * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port * access can be distinguished. + * @disable_locking: This regmap is either protected by external means or + * is guaranteed not to be accessed from multiple threads. + * Don't use any locking mechanisms. + * @lock: Optional lock callback (overrides regmap's default lock + * function, based on spinlock or mutex). + * @unlock: As above for unlocking. + * @lock_arg: This field is passed as the only argument of lock/unlock + * functions (ignored in case regular lock/unlock functions + * are not overridden). * @max_register: Optional, specifies the maximum valid register address. * @max_register_is_0: Optional, specifies that zero value in @max_register * should be taken into account. This is a workaround to @@ -373,21 +374,20 @@ typedef void (*regmap_unlock)(void *); * @reg_defaults_raw: Power on reset values for registers (for use with * register cache support). * @num_reg_defaults_raw: Number of elements in reg_defaults_raw. - * @reg_format_endian: Endianness for formatted register addresses. If this is - * DEFAULT, the @reg_format_endian_default value from the - * regmap bus is used. - * @val_format_endian: Endianness for formatted register values. If this is - * DEFAULT, the @reg_format_endian_default value from the - * regmap bus is used. - * - * @ranges: Array of configuration entries for virtual address ranges. - * @num_ranges: Number of range configuration entries. * @use_hwlock: Indicate if a hardware spinlock should be used. * @use_raw_spinlock: Indicate if a raw spinlock should be used. * @hwlock_id: Specify the hardware spinlock id. * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, * HWLOCK_IRQ or 0. - * @can_sleep: Optional, specifies whether regmap operations can sleep. + * @reg_format_endian: Endianness for formatted register addresses. If this is + * DEFAULT, the @reg_format_endian_default value from the + * regmap bus is used. + * @val_format_endian: Endianness for formatted register values. If this is + * DEFAULT, the @reg_format_endian_default value from the + * regmap bus is used. + * + * @ranges: Array of configuration entries for virtual address ranges. + * @num_ranges: Number of range configuration entries. */ struct regmap_config { const char *name; @@ -406,11 +406,6 @@ struct regmap_config { bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg); bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); - bool disable_locking; - regmap_lock lock; - regmap_unlock unlock; - void *lock_arg; - int (*reg_read)(void *context, unsigned int reg, unsigned int *val); int (*reg_write)(void *context, unsigned int reg, unsigned int val); int (*reg_update_bits)(void *context, unsigned int reg, @@ -422,9 +417,16 @@ struct regmap_config { size_t max_raw_read; size_t max_raw_write; + bool can_sleep; + bool fast_io; bool io_port; + bool disable_locking; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; + unsigned int max_register; bool max_register_is_0; const struct regmap_access_table *wr_table; @@ -448,18 +450,16 @@ struct regmap_config { bool use_relaxed_mmio; bool can_multi_write; - enum regmap_endian reg_format_endian; - enum regmap_endian val_format_endian; - - const struct regmap_range_cfg *ranges; - unsigned int num_ranges; - bool use_hwlock; bool use_raw_spinlock; unsigned int hwlock_id; unsigned int hwlock_mode; - bool can_sleep; + enum regmap_endian reg_format_endian; + enum regmap_endian val_format_endian; + + const struct regmap_range_cfg *ranges; + unsigned int num_ranges; }; /** diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index ed180ca419..59d0b9a79e 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -164,6 +164,7 @@ struct regulator *__must_check devm_regulator_get_optional(struct device *dev, const char *id); int devm_regulator_get_enable(struct device *dev, const char *id); int devm_regulator_get_enable_optional(struct device *dev, const char *id); +int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id); void regulator_put(struct regulator *regulator); void devm_regulator_put(struct regulator *regulator); @@ -329,6 +330,12 @@ static inline int devm_regulator_get_enable_optional(struct device *dev, return 0; } +static inline int devm_regulator_get_enable_read_voltage(struct device *dev, + const char *id) +{ + return -ENODEV; +} + static inline struct regulator *__must_check regulator_get_optional(struct device *dev, const char *id) { diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h index 505c908dbb..243633c8dc 100644 --- a/include/linux/regulator/pca9450.h +++ b/include/linux/regulator/pca9450.h @@ -9,6 +9,7 @@ enum pca9450_chip_type { PCA9450_TYPE_PCA9450A = 0, PCA9450_TYPE_PCA9450BC, + PCA9450_TYPE_PCA9451A, PCA9450_TYPE_AMOUNT, }; diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h index 7c2b7cc9fe..344ff41c22 100644 --- a/include/linux/remoteproc/mtk_scp.h +++ b/include/linux/remoteproc/mtk_scp.h @@ -43,6 +43,7 @@ enum scp_ipi_id { SCP_IPI_CROS_HOST_CMD, SCP_IPI_VDEC_LAT, SCP_IPI_VDEC_CORE, + SCP_IPI_IMGSYS_CMD, SCP_IPI_NS_SERVICE = 0xFF, SCP_IPI_MAX = 0x100, }; diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h index b6f3797277..015c8298be 100644 --- a/include/linux/rhashtable-types.h +++ b/include/linux/rhashtable-types.h @@ -9,6 +9,7 @@ #ifndef _LINUX_RHASHTABLE_TYPES_H #define _LINUX_RHASHTABLE_TYPES_H +#include <linux/alloc_tag.h> #include <linux/atomic.h> #include <linux/compiler.h> #include <linux/mutex.h> @@ -88,6 +89,9 @@ struct rhashtable { struct mutex mutex; spinlock_t lock; atomic_t nelems; +#ifdef CONFIG_MEM_ALLOC_PROFILING + struct alloc_tag *alloc_tag; +#endif }; /** @@ -127,9 +131,12 @@ struct rhashtable_iter { bool end_of_table; }; -int rhashtable_init(struct rhashtable *ht, +int rhashtable_init_noprof(struct rhashtable *ht, const struct rhashtable_params *params); -int rhltable_init(struct rhltable *hlt, +#define rhashtable_init(...) alloc_hooks(rhashtable_init_noprof(__VA_ARGS__)) + +int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params); +#define rhltable_init(...) alloc_hooks(rhltable_init_noprof(__VA_ARGS__)) #endif /* _LINUX_RHASHTABLE_TYPES_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 5b5357c0bd..8463a128e2 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -633,7 +633,7 @@ restart: * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. The first matching entry is returned. + * for an entry with an identical key. The first matching entry is returned. * * This must only be called under the RCU read lock. * @@ -655,7 +655,7 @@ static inline void *rhashtable_lookup( * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. The first matching entry is returned. + * for an entry with an identical key. The first matching entry is returned. * * Only use this function when you have other mechanisms guaranteeing * that the object won't go away after the RCU read lock is released. @@ -682,7 +682,7 @@ static inline void *rhashtable_lookup_fast( * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. All matching entries are returned + * for an entry with an identical key. All matching entries are returned * in a list. * * This must only be called under the RCU read lock. @@ -699,7 +699,7 @@ static inline struct rhlist_head *rhltable_lookup( } /* Internal function, please use rhashtable_insert_fast() instead. This - * function returns the existing element already in hashes in there is a clash, + * function returns the existing element already in hashes if there is a clash, * otherwise it returns an error via ERR_PTR(). */ static inline void *__rhashtable_insert_fast( @@ -1130,7 +1130,7 @@ static inline int rhashtable_remove_fast( * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus - * considerable slow if the hash table is not correctly sized. + * considerably slower if the hash table is not correctly sized. * * Will automatically shrink the table if permitted when residency drops * below 30% diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index dc5ae4e96a..96d2140b47 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -6,6 +6,8 @@ #include <linux/seq_file.h> #include <linux/poll.h> +#include <uapi/linux/trace_mmap.h> + struct trace_buffer; struct ring_buffer_iter; @@ -223,4 +225,8 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); #define trace_rb_cpu_prepare NULL #endif +int ring_buffer_map(struct trace_buffer *buffer, int cpu, + struct vm_area_struct *vma); +int ring_buffer_unmap(struct trace_buffer *buffer, int cpu); +int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu); #endif /* _LINUX_RING_BUFFER_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b7944a8336..7229b9baf2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -273,6 +273,7 @@ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, ClearPageAnonExclusive(&folio->page); } atomic_inc(&folio->_entire_mapcount); + atomic_inc(&folio->_large_mapcount); return 0; } @@ -284,7 +285,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); /* Paired with the memory barrier in try_grab_folio(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb(); if (unlikely(folio_maybe_dma_pinned(folio))) @@ -295,7 +296,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) * This is conceptually a smp_wmb() paired with the smp_rmb() in * gup_must_unshare(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb__after_atomic(); return 0; } @@ -306,6 +307,7 @@ static inline void hugetlb_add_file_rmap(struct folio *folio) VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); + atomic_inc(&folio->_large_mapcount); } static inline void hugetlb_remove_rmap(struct folio *folio) @@ -313,21 +315,31 @@ static inline void hugetlb_remove_rmap(struct folio *folio) VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); atomic_dec(&folio->_entire_mapcount); + atomic_dec(&folio->_large_mapcount); } static __always_inline void __folio_dup_file_rmap(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level) { + const int orig_nr_pages = nr_pages; + __folio_rmap_sanity_checks(folio, page, nr_pages, level); switch (level) { case RMAP_LEVEL_PTE: + if (!folio_test_large(folio)) { + atomic_inc(&page->_mapcount); + break; + } + do { atomic_inc(&page->_mapcount); } while (page++, --nr_pages > 0); + atomic_add(orig_nr_pages, &folio->_large_mapcount); break; case RMAP_LEVEL_PMD: atomic_inc(&folio->_entire_mapcount); + atomic_inc(&folio->_large_mapcount); break; } } @@ -347,8 +359,12 @@ static inline void folio_dup_file_rmap_ptes(struct folio *folio, { __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); } -#define folio_dup_file_rmap_pte(folio, page) \ - folio_dup_file_rmap_ptes(folio, page, 1) + +static __always_inline void folio_dup_file_rmap_pte(struct folio *folio, + struct page *page) +{ + __folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE); +} /** * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio @@ -373,6 +389,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *src_vma, enum rmap_level level) { + const int orig_nr_pages = nr_pages; bool maybe_pinned; int i; @@ -401,11 +418,20 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, if (PageAnonExclusive(page + i)) return -EBUSY; } + + if (!folio_test_large(folio)) { + if (PageAnonExclusive(page)) + ClearPageAnonExclusive(page); + atomic_inc(&page->_mapcount); + break; + } + do { if (PageAnonExclusive(page)) ClearPageAnonExclusive(page); atomic_inc(&page->_mapcount); } while (page++, --nr_pages > 0); + atomic_add(orig_nr_pages, &folio->_large_mapcount); break; case RMAP_LEVEL_PMD: if (PageAnonExclusive(page)) { @@ -414,6 +440,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, ClearPageAnonExclusive(page); } atomic_inc(&folio->_entire_mapcount); + atomic_inc(&folio->_large_mapcount); break; } return 0; @@ -448,8 +475,13 @@ static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, RMAP_LEVEL_PTE); } -#define folio_try_dup_anon_rmap_pte(folio, page, vma) \ - folio_try_dup_anon_rmap_ptes(folio, page, 1, vma) + +static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio, + struct page *page, struct vm_area_struct *src_vma) +{ + return __folio_try_dup_anon_rmap(folio, page, 1, src_vma, + RMAP_LEVEL_PTE); +} /** * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range @@ -541,7 +573,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, */ /* Paired with the memory barrier in try_grab_folio(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb(); if (unlikely(folio_maybe_dma_pinned(folio))) @@ -552,7 +584,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, * This is conceptually a smp_wmb() paired with the smp_rmb() in * gup_must_unshare(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb__after_atomic(); return 0; } @@ -698,7 +730,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); -int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); /* * rmap_walk_control: To control rmap traversing for specific needs diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index cdfc897f1e..a7da7dfc06 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -7,6 +7,7 @@ #include <linux/netdevice.h> #include <linux/wait.h> #include <linux/refcount.h> +#include <linux/cleanup.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -46,6 +47,8 @@ extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); extern bool refcount_dec_and_rtnl_lock(refcount_t *r); +DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock()) + extern wait_queue_head_t netdev_unregistering_wq; extern atomic_t dev_unreg_count; extern struct rw_semaphore pernet_ops_rwsem; diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index d662cf1360..c09cdcc994 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -36,6 +36,11 @@ struct sbitmap_word { * @cleared: word holding cleared bits */ unsigned long cleared ____cacheline_aligned_in_smp; + + /** + * @swap_lock: serializes simultaneous updates of ->word and ->cleared + */ + spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** diff --git a/include/linux/sched.h b/include/linux/sched.h index 3c2abbc587..76214d7c81 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -301,7 +301,7 @@ enum { TASK_COMM_LEN = 16, }; -extern void scheduler_tick(void); +extern void sched_tick(void); #define MAX_SCHEDULE_TIMEOUT LONG_MAX @@ -770,6 +770,10 @@ struct task_struct { unsigned int flags; unsigned int ptrace; +#ifdef CONFIG_MEM_ALLOC_PROFILING + struct alloc_tag *alloc_tag; +#endif + #ifdef CONFIG_SMP int on_cpu; struct __call_single_node wake_entry; @@ -810,6 +814,7 @@ struct task_struct { struct task_group *sched_task_group; #endif + #ifdef CONFIG_UCLAMP_TASK /* * Clamp values requested for a scheduling entity. @@ -835,6 +840,7 @@ struct task_struct { #endif unsigned int policy; + unsigned long max_allowed_capacity; int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t *user_cpus_ptr; @@ -1443,8 +1449,6 @@ struct task_struct { #ifdef CONFIG_MEMCG struct mem_cgroup *memcg_in_oom; - gfp_t memcg_oom_gfp_mask; - int memcg_oom_order; /* Number of pages to reclaim on returning to userland: */ unsigned int memcg_nr_pages_over_high; @@ -2060,47 +2064,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock); __cond_resched_rwlock_write(lock); \ }) -#ifdef CONFIG_PREEMPT_DYNAMIC - -extern bool preempt_model_none(void); -extern bool preempt_model_voluntary(void); -extern bool preempt_model_full(void); - -#else - -static inline bool preempt_model_none(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_NONE); -} -static inline bool preempt_model_voluntary(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); -} -static inline bool preempt_model_full(void) -{ - return IS_ENABLED(CONFIG_PREEMPT); -} - -#endif - -static inline bool preempt_model_rt(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_RT); -} - -/* - * Does the preemption model allow non-cooperative preemption? - * - * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with - * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the - * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the - * PREEMPT_NONE model. - */ -static inline bool preempt_model_preemptible(void) -{ - return preempt_model_full() || preempt_model_rt(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); @@ -2187,4 +2150,23 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); +#ifdef CONFIG_MEM_ALLOC_PROFILING +static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) +{ + swap(current->alloc_tag, tag); + return tag; +} + +static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) +{ +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG + WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); +#endif + current->alloc_tag = old; +} +#else +#define alloc_tag_save(_tag) NULL +#define alloc_tag_restore(_tag, _old) do {} while (0) +#endif + #endif diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 02f5090ffe..e62ff805cf 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -92,9 +92,12 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_VM_MERGE_ANY 30 #define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) +#define MMF_TOPDOWN 31 /* mm searches top down by default */ +#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN) + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ - MMF_VM_MERGE_ANY_MASK) + MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK) static inline unsigned long mmf_init_flags(unsigned long flags) { diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h index 478084f910..e670ac2823 100644 --- a/include/linux/sched/idle.h +++ b/include/linux/sched/idle.h @@ -5,8 +5,8 @@ #include <linux/sched.h> enum cpu_idle_type { + __CPU_NOT_IDLE = 0, CPU_IDLE, - CPU_NOT_IDLE, CPU_NEWLY_IDLE, CPU_MAX_IDLE_TYPES }; diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index b6543f9d78..91546493c4 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -8,6 +8,7 @@ #include <linux/mm_types.h> #include <linux/gfp.h> #include <linux/sync_core.h> +#include <linux/sched/coredump.h> /* * Routines for handling mm_structs @@ -186,6 +187,27 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); +unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); + +unsigned long +arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags); +unsigned long +arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t); + +unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, + struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags, + vm_flags_t vm_flags); + unsigned long generic_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 18572c9ea7..4237daa5ac 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -110,7 +110,7 @@ struct sched_domain { unsigned long last_decay_max_lb_cost; #ifdef CONFIG_SCHEDSTATS - /* load_balance() stats */ + /* sched_balance_rq() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; @@ -270,17 +270,17 @@ unsigned long arch_scale_cpu_capacity(int cpu) } #endif -#ifndef arch_scale_thermal_pressure +#ifndef arch_scale_hw_pressure static __always_inline -unsigned long arch_scale_thermal_pressure(int cpu) +unsigned long arch_scale_hw_pressure(int cpu) { return 0; } #endif -#ifndef arch_update_thermal_pressure +#ifndef arch_update_hw_pressure static __always_inline -void arch_update_thermal_pressure(const struct cpumask *cpus, +void arch_update_hw_pressure(const struct cpumask *cpus, unsigned long capped_frequency) { } #endif diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index b807141acc..3a9bb5b9a9 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -737,6 +737,89 @@ struct scmi_powercap_proto_ops { u32 *power_thresh_high); }; +enum scmi_pinctrl_selector_type { + PIN_TYPE = 0, + GROUP_TYPE, + FUNCTION_TYPE, +}; + +enum scmi_pinctrl_conf_type { + SCMI_PIN_DEFAULT = 0, + SCMI_PIN_BIAS_BUS_HOLD = 1, + SCMI_PIN_BIAS_DISABLE = 2, + SCMI_PIN_BIAS_HIGH_IMPEDANCE = 3, + SCMI_PIN_BIAS_PULL_UP = 4, + SCMI_PIN_BIAS_PULL_DEFAULT = 5, + SCMI_PIN_BIAS_PULL_DOWN = 6, + SCMI_PIN_DRIVE_OPEN_DRAIN = 7, + SCMI_PIN_DRIVE_OPEN_SOURCE = 8, + SCMI_PIN_DRIVE_PUSH_PULL = 9, + SCMI_PIN_DRIVE_STRENGTH = 10, + SCMI_PIN_INPUT_DEBOUNCE = 11, + SCMI_PIN_INPUT_MODE = 12, + SCMI_PIN_PULL_MODE = 13, + SCMI_PIN_INPUT_VALUE = 14, + SCMI_PIN_INPUT_SCHMITT = 15, + SCMI_PIN_LOW_POWER_MODE = 16, + SCMI_PIN_OUTPUT_MODE = 17, + SCMI_PIN_OUTPUT_VALUE = 18, + SCMI_PIN_POWER_SOURCE = 19, + SCMI_PIN_SLEW_RATE = 20, + SCMI_PIN_OEM_START = 192, + SCMI_PIN_OEM_END = 255, +}; + +/** + * struct scmi_pinctrl_proto_ops - represents the various operations provided + * by SCMI Pinctrl Protocol + * + * @count_get: returns count of the registered elements in given type + * @name_get: returns name by index of given type + * @group_pins_get: returns the set of pins, assigned to the specified group + * @function_groups_get: returns the set of groups, assigned to the specified + * function + * @mux_set: set muxing function for groups of pins + * @settings_get_one: returns one configuration parameter for pin or group + * specified by config_type + * @settings_get_all: returns all configuration parameters for pin or group + * @settings_conf: sets the configuration parameter for pin or group + * @pin_request: aquire pin before selecting mux setting + * @pin_free: frees pin, acquired by request_pin call + */ +struct scmi_pinctrl_proto_ops { + int (*count_get)(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type); + int (*name_get)(const struct scmi_protocol_handle *ph, u32 selector, + enum scmi_pinctrl_selector_type type, + const char **name); + int (*group_pins_get)(const struct scmi_protocol_handle *ph, + u32 selector, const unsigned int **pins, + unsigned int *nr_pins); + int (*function_groups_get)(const struct scmi_protocol_handle *ph, + u32 selector, unsigned int *nr_groups, + const unsigned int **groups); + int (*mux_set)(const struct scmi_protocol_handle *ph, u32 selector, + u32 group); + int (*settings_get_one)(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_conf_type config_type, + u32 *config_value); + int (*settings_get_all)(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values); + int (*settings_conf)(const struct scmi_protocol_handle *ph, + u32 selector, enum scmi_pinctrl_selector_type type, + unsigned int nr_configs, + enum scmi_pinctrl_conf_type *config_type, + u32 *config_value); + int (*pin_request)(const struct scmi_protocol_handle *ph, u32 pin); + int (*pin_free)(const struct scmi_protocol_handle *ph, u32 pin); +}; + /** * struct scmi_notify_ops - represents notifications' operations provided by * SCMI core @@ -783,8 +866,6 @@ struct scmi_notify_ops { const u32 *src_id, struct notifier_block *nb); int (*devm_event_notifier_unregister)(struct scmi_device *sdev, - u8 proto_id, u8 evt_id, - const u32 *src_id, struct notifier_block *nb); int (*event_notifier_register)(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, @@ -844,6 +925,7 @@ enum scmi_std_protocol { SCMI_PROTOCOL_RESET = 0x16, SCMI_PROTOCOL_VOLTAGE = 0x17, SCMI_PROTOCOL_POWERCAP = 0x18, + SCMI_PROTOCOL_PINCTRL = 0x19, }; enum scmi_system_events { diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 75303c1262..6a4a3cec46 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -49,6 +49,16 @@ static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned return lfb_size; } +static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si) +{ + /* + * VESA modes typically run on VGA hardware. Set bit 5 signals that this + * is not the case. Drivers can then not make use of VGA resources. See + * Sec 4.4 of the VBE 2.0 spec. + */ + return si->vesa_attributes & BIT(5); +} + static inline unsigned int __screen_info_video_type(unsigned int type) { switch (type) { diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h index acf7e1a3f3..e918f96881 100644 --- a/include/linux/secretmem.h +++ b/include/linux/secretmem.h @@ -6,25 +6,8 @@ extern const struct address_space_operations secretmem_aops; -static inline bool folio_is_secretmem(struct folio *folio) +static inline bool secretmem_mapping(struct address_space *mapping) { - struct address_space *mapping; - - /* - * Using folio_mapping() is quite slow because of the actual call - * instruction. - * We know that secretmem pages are not compound, so we can - * save a couple of cycles here. - */ - if (folio_test_large(folio)) - return false; - - mapping = (struct address_space *) - ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS); - - if (!mapping || mapping != folio->mapping) - return false; - return mapping->a_ops == &secretmem_aops; } @@ -38,7 +21,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma) return false; } -static inline bool folio_is_secretmem(struct folio *folio) +static inline bool secretmem_mapping(struct address_space *mapping) { return false; } diff --git a/include/linux/security.h b/include/linux/security.h index 5122e3ad83..de3af33e6f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -398,7 +398,7 @@ int security_inode_setsecurity(struct inode *inode, const char *name, const void int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); void security_inode_getsecid(struct inode *inode, u32 *secid); int security_inode_copy_up(struct dentry *src, struct cred **new); -int security_inode_copy_up_xattr(const char *name); +int security_inode_copy_up_xattr(struct dentry *src, const char *name); int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn); int security_file_permission(struct file *file, int mask); @@ -1016,7 +1016,7 @@ static inline int security_kernfs_init_security(struct kernfs_node *kn_dir, return 0; } -static inline int security_inode_copy_up_xattr(const char *name) +static inline int security_inode_copy_up_xattr(struct dentry *src, const char *name) { return -EOPNOTSUPP; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 234bcdb1fb..8bd4fda6e0 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -118,7 +118,18 @@ void seq_vprintf(struct seq_file *m, const char *fmt, va_list args); __printf(2, 3) void seq_printf(struct seq_file *m, const char *fmt, ...); void seq_putc(struct seq_file *m, char c); -void seq_puts(struct seq_file *m, const char *s); +void __seq_puts(struct seq_file *m, const char *s); + +static __always_inline void seq_puts(struct seq_file *m, const char *s) +{ + if (!__builtin_constant_p(*s)) + __seq_puts(m, s); + else if (s[0] && !s[1]) + seq_putc(m, s[0]); + else + seq_write(m, s, __builtin_strlen(s)); +} + void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, unsigned long long num, unsigned int width); void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4697b8f450..aea25eef9a 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -11,7 +11,6 @@ #include <linux/compiler.h> #include <linux/console.h> #include <linux/interrupt.h> -#include <linux/circ_buf.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/tty.h> @@ -699,7 +698,6 @@ struct uart_state { struct tty_port port; enum uart_pm_state pm_state; - struct circ_buf xmit; atomic_t refcount; wait_queue_head_t remove_wait; @@ -723,12 +721,35 @@ struct uart_state { */ static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) { - struct circ_buf *xmit = &up->state->xmit; + struct tty_port *tport = &up->state->port; - xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1); + kfifo_skip_count(&tport->xmit_fifo, chars); up->icount.tx += chars; } +static inline unsigned int uart_fifo_out(struct uart_port *up, + unsigned char *buf, unsigned int chars) +{ + struct tty_port *tport = &up->state->port; + + chars = kfifo_out(&tport->xmit_fifo, buf, chars); + up->icount.tx += chars; + + return chars; +} + +static inline unsigned int uart_fifo_get(struct uart_port *up, + unsigned char *ch) +{ + struct tty_port *tport = &up->state->port; + unsigned int chars; + + chars = kfifo_get(&tport->xmit_fifo, ch); + up->icount.tx += chars; + + return chars; +} + struct module; struct tty_driver; @@ -764,7 +785,7 @@ enum UART_TX_FLAGS { for_test, for_post) \ ({ \ struct uart_port *__port = (uport); \ - struct circ_buf *xmit = &__port->state->xmit; \ + struct tty_port *__tport = &__port->state->port; \ unsigned int pending; \ \ for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \ @@ -775,17 +796,18 @@ enum UART_TX_FLAGS { continue; \ } \ \ - if (uart_circ_empty(xmit) || uart_tx_stopped(__port)) \ + if (uart_tx_stopped(__port)) \ + break; \ + \ + if (!kfifo_get(&__tport->xmit_fifo, &(ch))) \ break; \ \ - (ch) = xmit->buf[xmit->tail]; \ (put_char); \ - xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; \ } \ \ (tx_done); \ \ - pending = uart_circ_chars_pending(xmit); \ + pending = kfifo_len(&__tport->xmit_fifo); \ if (pending < WAKEUP_CHARS) { \ uart_write_wakeup(__port); \ \ @@ -991,15 +1013,6 @@ bool uart_match_port(const struct uart_port *port1, int uart_suspend_port(struct uart_driver *reg, struct uart_port *port); int uart_resume_port(struct uart_driver *reg, struct uart_port *port); -#define uart_circ_empty(circ) ((circ)->head == (circ)->tail) -#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0) - -#define uart_circ_chars_pending(circ) \ - (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE)) - -#define uart_circ_chars_free(circ) \ - (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) - static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h deleted file mode 100644 index befd55c08a..0000000000 --- a/include/linux/serial_max3100.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * - * Copyright (C) 2007 Christian Pellegrin - */ - - -#ifndef _LINUX_SERIAL_MAX3100_H -#define _LINUX_SERIAL_MAX3100_H 1 - - -/** - * struct plat_max3100 - MAX3100 SPI UART platform data - * @loopback: force MAX3100 in loopback - * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 - * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook - * called on suspend and resume to activate it. - * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw - * flow ctrl is possible but you have less CPU usage) - * - * You should use this structure in your machine description to specify - * how the MAX3100 is connected. Example: - * - * static struct plat_max3100 max3100_plat_data = { - * .loopback = 0, - * .crystal = 0, - * .poll_time = 100, - * }; - * - * static struct spi_board_info spi_board_info[] = { - * { - * .modalias = "max3100", - * .platform_data = &max3100_plat_data, - * .irq = IRQ_EINT12, - * .max_speed_hz = 5*1000*1000, - * .chip_select = 0, - * }, - * }; - * - **/ -struct plat_max3100 { - int loopback; - int crystal; - void (*max3100_hw_suspend) (int suspend); - int poll_time; -}; - -#endif diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 9346cd4481..a45da7eef9 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -554,7 +554,7 @@ bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id); void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support, unsigned long *interfaces); phy_interface_t sfp_select_interface(struct sfp_bus *bus, - unsigned long *link_modes); + const unsigned long *link_modes); int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo); int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, @@ -592,7 +592,7 @@ static inline void sfp_parse_support(struct sfp_bus *bus, } static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus, - unsigned long *link_modes) + const unsigned long *link_modes) { return PHY_INTERFACE_MODE_NA; } diff --git a/include/linux/shm.h b/include/linux/shm.h index c55bef0538..1d3d3ae958 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -16,7 +16,6 @@ struct sysv_shm { long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr, unsigned long shmlba); -bool is_file_shm_hugepages(struct file *file); void exit_shm(struct task_struct *task); #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist) #else @@ -30,10 +29,6 @@ static inline long do_shmat(int shmid, char __user *shmaddr, { return -ENOSYS; } -static inline bool is_file_shm_hugepages(struct file *file) -{ - return false; -} static inline void exit_shm(struct task_struct *task) { } diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index e2d45b7cb6..926496c9cc 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -177,10 +177,11 @@ static inline int skb_array_peek_len_any(struct skb_array *a) return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); } -static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) +static inline int skb_array_init_noprof(struct skb_array *a, int size, gfp_t gfp) { - return ptr_ring_init(&a->ring, size, gfp); + return ptr_ring_init_noprof(&a->ring, size, gfp); } +#define skb_array_init(...) alloc_hooks(skb_array_init_noprof(__VA_ARGS__)) static void __skb_array_destroy_skb(void *ptr) { @@ -198,15 +199,17 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); } -static inline int skb_array_resize_multiple(struct skb_array **rings, - int nrings, unsigned int size, - gfp_t gfp) +static inline int skb_array_resize_multiple_noprof(struct skb_array **rings, + int nrings, unsigned int size, + gfp_t gfp) { BUILD_BUG_ON(offsetof(struct skb_array, ring)); - return ptr_ring_resize_multiple((struct ptr_ring **)rings, - nrings, size, gfp, - __skb_array_destroy_skb); + return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings, + nrings, size, gfp, + __skb_array_destroy_skb); } +#define skb_array_resize_multiple(...) \ + alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__)) static inline void skb_array_cleanup(struct skb_array *a) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5b1078c160..1c2902eaeb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -353,8 +353,6 @@ struct sk_buff; #define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS -extern int sysctl_max_skb_frags; - /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to * segment using its current segmentation instead. */ @@ -527,6 +525,13 @@ enum { #define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \ SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS) +struct ubuf_info_ops { + void (*complete)(struct sk_buff *, struct ubuf_info *, + bool zerocopy_success); + /* has to be compatible with skb_zcopy_set() */ + int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg); +}; + /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. @@ -536,8 +541,7 @@ enum { * The desc field is used to track userspace buffer index. */ struct ubuf_info { - void (*callback)(struct sk_buff *, struct ubuf_info *, - bool zerocopy_success); + const struct ubuf_info_ops *ops; refcount_t refcnt; u8 flags; }; @@ -992,7 +996,7 @@ struct sk_buff { #ifdef CONFIG_NETFILTER_SKIP_EGRESS __u8 nf_skip_egress:1; #endif -#ifdef CONFIG_TLS_DEVICE +#ifdef CONFIG_SKB_DECRYPTED __u8 decrypted:1; #endif __u8 slow_gro:1; @@ -1606,17 +1610,26 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) static inline int skb_cmp_decrypted(const struct sk_buff *skb1, const struct sk_buff *skb2) { -#ifdef CONFIG_TLS_DEVICE +#ifdef CONFIG_SKB_DECRYPTED return skb2->decrypted - skb1->decrypted; #else return 0; #endif } +static inline bool skb_is_decrypted(const struct sk_buff *skb) +{ +#ifdef CONFIG_SKB_DECRYPTED + return skb->decrypted; +#else + return false; +#endif +} + static inline void skb_copy_decrypted(struct sk_buff *to, const struct sk_buff *from) { -#ifdef CONFIG_TLS_DEVICE +#ifdef CONFIG_SKB_DECRYPTED to->decrypted = from->decrypted; #endif } @@ -1653,14 +1666,13 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) } #endif +extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops; + struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, struct ubuf_info *uarg); void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); -void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, - bool success); - int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, struct iov_iter *from, size_t length); @@ -1748,13 +1760,13 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) static inline void net_zcopy_put(struct ubuf_info *uarg) { if (uarg) - uarg->callback(NULL, uarg, true); + uarg->ops->complete(NULL, uarg, true); } static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref) { if (uarg) { - if (uarg->callback == msg_zerocopy_callback) + if (uarg->ops == &msg_zerocopy_ubuf_ops) msg_zerocopy_put_abort(uarg, have_uref); else if (have_uref) net_zcopy_put(uarg); @@ -1768,7 +1780,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) if (uarg) { if (!skb_zcopy_is_nouarg(skb)) - uarg->callback(skb, uarg, zerocopy_success); + uarg->ops->complete(skb, uarg, zerocopy_success); skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; } @@ -3356,13 +3368,7 @@ static inline void *napi_alloc_frag_align(unsigned int fragsz, return __napi_alloc_frag_align(fragsz, -align); } -struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, - unsigned int length, gfp_t gfp_mask); -static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, - unsigned int length) -{ - return __napi_alloc_skb(napi, length, GFP_ATOMIC); -} +struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length); void napi_consume_skb(struct sk_buff *skb, int budget); void napi_skb_free_stolen_head(struct sk_buff *skb); @@ -3377,7 +3383,7 @@ void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason); * * %NULL is returned if there is no free memory. */ -static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, +static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { /* This piece of code contains several assumptions. @@ -3390,13 +3396,11 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, */ gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; - return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); + return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order); } +#define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__)) -static inline struct page *dev_alloc_pages(unsigned int order) -{ - return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); -} +#define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order) /** * __dev_alloc_page - allocate a page for network Rx @@ -3406,15 +3410,13 @@ static inline struct page *dev_alloc_pages(unsigned int order) * * %NULL is returned if there is no free memory. */ -static inline struct page *__dev_alloc_page(gfp_t gfp_mask) +static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask) { - return __dev_alloc_pages(gfp_mask, 0); + return __dev_alloc_pages_noprof(gfp_mask, 0); } +#define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__)) -static inline struct page *dev_alloc_page(void) -{ - return dev_alloc_pages(0); -} +#define dev_alloc_page() dev_alloc_pages(0) /** * dev_page_is_reusable - check whether a page can be reused for network Rx @@ -3495,85 +3497,10 @@ static inline struct page *skb_frag_page(const skb_frag_t *frag) return netmem_to_page(frag->netmem); } -/** - * __skb_frag_ref - take an addition reference on a paged fragment. - * @frag: the paged fragment - * - * Takes an additional reference on the paged fragment @frag. - */ -static inline void __skb_frag_ref(skb_frag_t *frag) -{ - get_page(skb_frag_page(frag)); -} - -/** - * skb_frag_ref - take an addition reference on a paged fragment of an skb. - * @skb: the buffer - * @f: the fragment offset. - * - * Takes an additional reference on the @f'th paged fragment of @skb. - */ -static inline void skb_frag_ref(struct sk_buff *skb, int f) -{ - __skb_frag_ref(&skb_shinfo(skb)->frags[f]); -} - int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom); int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, struct bpf_prog *prog); -bool napi_pp_put_page(struct page *page, bool napi_safe); - -static inline void -skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe) -{ -#ifdef CONFIG_PAGE_POOL - if (skb->pp_recycle && napi_pp_put_page(page, napi_safe)) - return; -#endif - put_page(page); -} - -static inline void -napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe) -{ - struct page *page = skb_frag_page(frag); - -#ifdef CONFIG_PAGE_POOL - if (recycle && napi_pp_put_page(page, napi_safe)) - return; -#endif - put_page(page); -} - -/** - * __skb_frag_unref - release a reference on a paged fragment. - * @frag: the paged fragment - * @recycle: recycle the page if allocated via page_pool - * - * Releases a reference on the paged fragment @frag - * or recycles the page via the page_pool API. - */ -static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) -{ - napi_frag_unref(frag, recycle, false); -} - -/** - * skb_frag_unref - release a reference on a paged fragment of an skb. - * @skb: the buffer - * @f: the fragment offset - * - * Releases a reference on the @f'th paged fragment of @skb. - */ -static inline void skb_frag_unref(struct sk_buff *skb, int f) -{ - struct skb_shared_info *shinfo = skb_shinfo(skb); - - if (!skb_zcopy_managed(skb)) - __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); -} - /** * skb_frag_address - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer @@ -4064,12 +3991,6 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); -static inline void skb_free_datagram_locked(struct sock *sk, - struct sk_buff *skb) -{ - __skb_free_datagram_locked(sk, skb, 0); -} int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h new file mode 100644 index 0000000000..11f0a40634 --- /dev/null +++ b/include/linux/skbuff_ref.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Skb ref helpers. + * + */ + +#ifndef _LINUX_SKBUFF_REF_H +#define _LINUX_SKBUFF_REF_H + +#include <linux/skbuff.h> + +/** + * __skb_frag_ref - take an addition reference on a paged fragment. + * @frag: the paged fragment + * + * Takes an additional reference on the paged fragment @frag. + */ +static inline void __skb_frag_ref(skb_frag_t *frag) +{ + get_page(skb_frag_page(frag)); +} + +/** + * skb_frag_ref - take an addition reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset. + * + * Takes an additional reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_ref(struct sk_buff *skb, int f) +{ + __skb_frag_ref(&skb_shinfo(skb)->frags[f]); +} + +bool napi_pp_put_page(struct page *page); + +static inline void +skb_page_unref(struct page *page, bool recycle) +{ +#ifdef CONFIG_PAGE_POOL + if (recycle && napi_pp_put_page(page)) + return; +#endif + put_page(page); +} + +/** + * __skb_frag_unref - release a reference on a paged fragment. + * @frag: the paged fragment + * @recycle: recycle the page if allocated via page_pool + * + * Releases a reference on the paged fragment @frag + * or recycles the page via the page_pool API. + */ +static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) +{ + skb_page_unref(skb_frag_page(frag), recycle); +} + +/** + * skb_frag_unref - release a reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset + * + * Releases a reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_unref(struct sk_buff *skb, int f) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (!skb_zcopy_managed(skb)) + __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); +} + +#endif /* _LINUX_SKBUFF_REF_H */ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index a509caf823..c9efda9df2 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -58,6 +58,10 @@ struct sk_psock_progs { struct bpf_prog *stream_parser; struct bpf_prog *stream_verdict; struct bpf_prog *skb_verdict; + struct bpf_link *msg_parser_link; + struct bpf_link *stream_parser_link; + struct bpf_link *stream_verdict_link; + struct bpf_link *skb_verdict_link; }; enum sk_psock_state_bits { @@ -410,11 +414,9 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg); -static inline struct sk_psock_link *sk_psock_init_link(void) -{ - return kzalloc(sizeof(struct sk_psock_link), - GFP_ATOMIC | __GFP_NOWARN); -} +#define sk_psock_init_link() \ + ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \ + GFP_ATOMIC | __GFP_NOWARN)) static inline void sk_psock_free_link(struct sk_psock_link *link) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 739b212625..7247e217e2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -56,6 +56,9 @@ enum _slab_flag_bits { #endif _SLAB_OBJECT_POISON, _SLAB_CMPXCHG_DOUBLE, +#ifdef CONFIG_SLAB_OBJ_EXT + _SLAB_NO_OBJ_EXT, +#endif _SLAB_FLAGS_LAST_BIT }; @@ -202,6 +205,13 @@ enum _slab_flag_bits { #endif #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ +/* Slab created using create_boot_cache */ +#ifdef CONFIG_SLAB_OBJ_EXT +#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) +#else +#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED +#endif + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * @@ -261,7 +271,10 @@ int kmem_cache_shrink(struct kmem_cache *s); /* * Common kmalloc functions provided by all allocators */ -void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2); +void * __must_check krealloc_noprof(const void *objp, size_t new_size, + gfp_t flags) __realloc_size(2); +#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__)) + void kfree(const void *objp); void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); @@ -513,7 +526,10 @@ static __always_inline unsigned int __kmalloc_index(size_t size, static_assert(PAGE_SHIFT <= 20); #define kmalloc_index(s) __kmalloc_index(s, true) -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); +#include <linux/alloc_tag.h> + +void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); +#define __kmalloc(...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__)) /** * kmem_cache_alloc - Allocate an object @@ -525,9 +541,14 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz * * Return: pointer to the new object or %NULL in case of error */ -void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; -void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, - gfp_t gfpflags) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_noprof(struct kmem_cache *cachep, + gfp_t flags) __assume_slab_alignment __malloc; +#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) + +void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, + gfp_t gfpflags) __assume_slab_alignment __malloc; +#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) + void kmem_cache_free(struct kmem_cache *s, void *objp); /* @@ -538,29 +559,40 @@ void kmem_cache_free(struct kmem_cache *s, void *objp); * Note that interrupts must be enabled when calling these functions. */ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); -int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); + +int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p); +#define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__)) static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); } -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment +void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __alloc_size(1); -void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment - __malloc; +#define __kmalloc_node(...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__)) -void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) +void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, + int node) __assume_slab_alignment __malloc; +#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) + +void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size) __assume_kmalloc_alignment __alloc_size(3); -void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) __assume_kmalloc_alignment +void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size) __assume_kmalloc_alignment __alloc_size(4); -void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment +#define kmalloc_trace(...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__)) + +#define kmalloc_node_trace(...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__)) + +void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment __alloc_size(1); +#define kmalloc_large(...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__)) -void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment +void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment __alloc_size(1); +#define kmalloc_large_node(...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__)) /** * kmalloc - allocate kernel memory @@ -616,37 +648,39 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * Try really hard to succeed the allocation but fail * eventually. */ -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) +static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && size) { unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); + return kmalloc_large_noprof(size, flags); index = kmalloc_index(size); - return kmalloc_trace( + return kmalloc_trace_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, size); } - return __kmalloc(size, flags); + return __kmalloc_noprof(size, flags); } +#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__)) -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_node(size, flags, node); + return kmalloc_large_node_noprof(size, flags, node); index = kmalloc_index(size); - return kmalloc_node_trace( + return kmalloc_node_trace_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, node, size); } - return __kmalloc_node(size, flags, node); + return __kmalloc_node_noprof(size, flags, node); } +#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) /** * kmalloc_array - allocate memory for an array. @@ -654,16 +688,17 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) - return kmalloc(bytes, flags); - return __kmalloc(bytes, flags); + return kmalloc_noprof(bytes, flags); + return kmalloc_noprof(bytes, flags); } +#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__)) /** * krealloc_array - reallocate memory for an array. @@ -672,18 +707,19 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_ * @new_size: new size of a single member of the array * @flags: the type of memory to allocate (see kmalloc) */ -static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p, - size_t new_n, - size_t new_size, - gfp_t flags) +static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) return NULL; - return krealloc(p, bytes, flags); + return krealloc_noprof(p, bytes, flags); } +#define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__)) /** * kcalloc - allocate memory for an array. The memory is set to zero. @@ -691,16 +727,12 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p, * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags) -{ - return kmalloc_array(n, size, flags | __GFP_ZERO); -} +#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO) -void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, +void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node, unsigned long caller) __alloc_size(1); -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node_track_caller(size, flags, node, \ - _RET_IP_) +#define kmalloc_node_track_caller(...) \ + alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) /* * kmalloc_track_caller is a special version of kmalloc that records the @@ -710,11 +742,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, * allocator where we care about the real place the memory allocation * request comes from. */ -#define kmalloc_track_caller(size, flags) \ - __kmalloc_node_track_caller(size, flags, \ - NUMA_NO_NODE, _RET_IP_) +#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) -static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, +#define kmalloc_track_caller_noprof(...) \ + kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) + +static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; @@ -722,75 +755,63 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) - return kmalloc_node(bytes, flags, node); - return __kmalloc_node(bytes, flags, node); + return kmalloc_node_noprof(bytes, flags, node); + return __kmalloc_node_noprof(bytes, flags, node); } +#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) -static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) -{ - return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); -} +#define kcalloc_node(_n, _size, _flags, _node) \ + kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node) /* * Shortcuts */ -static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) -{ - return kmem_cache_alloc(k, flags | __GFP_ZERO); -} +#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). */ -static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags) +static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags) { - return kmalloc(size, flags | __GFP_ZERO); + return kmalloc_noprof(size, flags | __GFP_ZERO); } +#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__)) +#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) -/** - * kzalloc_node - allocate zeroed memory from a particular memory node. - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kmalloc). - * @node: memory node from which to allocate - */ -static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node) -{ - return kmalloc_node(size, flags | __GFP_ZERO, node); -} +extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_size(1); +#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) -extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); -static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags) -{ - return kvmalloc_node(size, flags, NUMA_NO_NODE); -} -static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node) -{ - return kvmalloc_node(size, flags | __GFP_ZERO, node); -} -static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) -{ - return kvmalloc(size, flags | __GFP_ZERO); -} +#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) +#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) +#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO) + +#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node) -static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void * +kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; - return kvmalloc(bytes, flags); + return kvmalloc_node_noprof(bytes, flags, node); } -static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) -{ - return kvmalloc_array(n, size, flags | __GFP_ZERO); -} +#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE) +#define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node) +#define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE) -extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) +#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) +#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__)) +#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__)) + +extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); +#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__)) + extern void kvfree(const void *addr); DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T)) diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 649955d2cf..d4a8e34505 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -14,6 +14,15 @@ #define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0))) #define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1)) +/* + * Every cmdq thread has its own SPRs (Specific Purpose Registers), + * so there are 4 * N (threads) SPRs in GCE that shares the same indexes below. + */ +#define CMDQ_THR_SPR_IDX0 (0) +#define CMDQ_THR_SPR_IDX1 (1) +#define CMDQ_THR_SPR_IDX2 (2) +#define CMDQ_THR_SPR_IDX3 (3) + struct cmdq_pkt; struct cmdq_client_reg { @@ -62,17 +71,19 @@ void cmdq_mbox_destroy(struct cmdq_client *client); /** * cmdq_pkt_create() - create a CMDQ packet * @client: the CMDQ mailbox client + * @pkt: the CMDQ packet * @size: required CMDQ buffer size * - * Return: CMDQ packet pointer + * Return: 0 for success; else the error code is returned */ -struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size); +int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size); /** * cmdq_pkt_destroy() - destroy the CMDQ packet + * @client: the CMDQ mailbox client * @pkt: the CMDQ packet */ -void cmdq_pkt_destroy(struct cmdq_pkt *pkt); +void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt); /** * cmdq_pkt_write() - append write command to the CMDQ packet @@ -174,6 +185,18 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx, u16 addr_low, u32 value, u32 mask); /** + * cmdq_pkt_mem_move() - append memory move command to the CMDQ packet + * @pkt: the CMDQ packet + * @src_addr: source address + * @dst_addr: destination address + * + * Appends a CMDQ command to copy the value found in `src_addr` to `dst_addr`. + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr); + +/** * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet * @pkt: the CMDQ packet * @event: the desired event type to wait @@ -184,6 +207,21 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx, int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear); /** + * cmdq_pkt_acquire_event() - append acquire event command to the CMDQ packet + * @pkt: the CMDQ packet + * @event: the desired event to be acquired + * + * User can use cmdq_pkt_acquire_event() as `mutex_lock` and cmdq_pkt_clear_event() + * as `mutex_unlock` to protect some `critical section` instructions between them. + * cmdq_pkt_acquire_event() would wait for event to be cleared. + * After event is cleared by cmdq_pkt_clear_event in other GCE threads, + * cmdq_pkt_acquire_event() would set event and keep executing next instruction. + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event); + +/** * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet * @pkt: the CMDQ packet * @event: the desired event to be cleared @@ -248,36 +286,76 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); /** - * cmdq_pkt_jump() - Append jump command to the CMDQ packet, ask GCE - * to execute an instruction that change current thread PC to - * a physical address which should contains more instruction. + * cmdq_pkt_poll_addr() - Append blocking POLL command to CMDQ packet + * @pkt: the CMDQ packet + * @addr: the hardware register address + * @value: the specified target register value + * @mask: the specified target register mask + * + * Appends a polling (POLL) command to the CMDQ packet and asks the GCE + * to execute an instruction that checks for the specified `value` (with + * or without `mask`) to appear in the specified hardware register `addr`. + * All GCE threads will be blocked by this instruction. + * + * Return: 0 for success or negative error code + */ +int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask); + +/** + * cmdq_pkt_jump_abs() - Append jump command to the CMDQ packet, ask GCE + * to execute an instruction that change current thread + * PC to a absolute physical address which should + * contains more instruction. * @pkt: the CMDQ packet - * @addr: physical address of target instruction buffer + * @addr: absolute physical address of target instruction buffer + * @shift_pa: shift bits of physical address in CMDQ instruction. This value + * is got by cmdq_get_shift_pa(). * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr); +int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa); + +/* This wrapper has to be removed after all users migrated to jump_abs */ +static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) +{ + return cmdq_pkt_jump_abs(pkt, addr, shift_pa); +} /** - * cmdq_pkt_finalize() - Append EOC and jump command to pkt. + * cmdq_pkt_jump_rel() - Append jump command to the CMDQ packet, ask GCE + * to execute an instruction that change current thread + * PC to a physical address with relative offset. The + * target address should contains more instruction. * @pkt: the CMDQ packet + * @offset: relative offset of target instruction buffer from current PC. + * @shift_pa: shift bits of physical address in CMDQ instruction. This value + * is got by cmdq_get_shift_pa(). * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_finalize(struct cmdq_pkt *pkt); +int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa); + +/** + * cmdq_pkt_eoc() - Append EOC and ask GCE to generate an IRQ at end of execution + * @pkt: The CMDQ packet + * + * Appends an End Of Code (EOC) command to the CMDQ packet and asks the GCE + * to generate an interrupt at the end of the execution of all commands in + * the pipeline. + * The EOC command is usually appended to the end of the pipeline to notify + * that all commands are done. + * + * Return: 0 for success or negative error number + */ +int cmdq_pkt_eoc(struct cmdq_pkt *pkt); /** - * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ - * packet and call back at the end of done packet + * cmdq_pkt_finalize() - Append EOC and jump command to pkt. * @pkt: the CMDQ packet * * Return: 0 for success; else the error code is returned - * - * Trigger CMDQ to asynchronously execute the CMDQ packet and call back - * at the end of done packet. Note that this is an ASYNC function. When the - * function returned, it may or may not be finished. */ -int cmdq_pkt_flush_async(struct cmdq_pkt *pkt); +int cmdq_pkt_finalize(struct cmdq_pkt *pkt); #else /* IS_ENABLED(CONFIG_MTK_CMDQ) */ @@ -294,12 +372,12 @@ static inline struct cmdq_client *cmdq_mbox_create(struct device *dev, int index static inline void cmdq_mbox_destroy(struct cmdq_client *client) { } -static inline struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size) +static inline int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size) { - return ERR_PTR(-EINVAL); + return -EINVAL; } -static inline void cmdq_pkt_destroy(struct cmdq_pkt *pkt) { } +static inline void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt) { } static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) { @@ -374,17 +452,32 @@ static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) return -EINVAL; } -static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr) +static inline int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask) { return -EINVAL; } -static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt) +static inline int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) +{ + return -EINVAL; +} + +static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) +{ + return -EINVAL; +} + +static inline int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa) { return -EINVAL; } -static inline int cmdq_pkt_flush_async(struct cmdq_pkt *pkt) +static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt) +{ + return -EINVAL; +} + +static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { return -EINVAL; } diff --git a/include/linux/socket.h b/include/linux/socket.h index 139c330ccf..89d16b9037 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -16,6 +16,7 @@ struct cred; struct socket; struct sock; struct sk_buff; +struct proto_accept_arg; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -433,7 +434,7 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, extern int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags, struct sockaddr __user *addr, int addr_len); -extern struct file *do_accept(struct file *file, unsigned file_flags, +extern struct file *do_accept(struct file *file, struct proto_accept_arg *arg, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 317200cd3a..fc5a206c40 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -117,9 +117,9 @@ static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) return copy_to_sockptr_offset(dst, 0, src, size); } -static inline void *memdup_sockptr(sockptr_t src, size_t len) +static inline void *memdup_sockptr_noprof(sockptr_t src, size_t len) { - void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); + void *p = kmalloc_track_caller_noprof(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); @@ -129,10 +129,11 @@ static inline void *memdup_sockptr(sockptr_t src, size_t len) } return p; } +#define memdup_sockptr(...) alloc_hooks(memdup_sockptr_noprof(__VA_ARGS__)) -static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) +static inline void *memdup_sockptr_nul_noprof(sockptr_t src, size_t len) { - char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); + char *p = kmalloc_track_caller_noprof(len + 1, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); @@ -143,6 +144,7 @@ static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) p[len] = '\0'; return p; } +#define memdup_sockptr_nul(...) alloc_hooks(memdup_sockptr_nul_noprof(__VA_ARGS__)) static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) { diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 66f814b63a..13e96d8b74 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -235,6 +235,7 @@ enum sdw_clk_stop_mode { * @BRA_flow_controlled: Slave implementation results in an OK_NotReady * response * @simple_ch_prep_sm: If channel prepare sequence is required + * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts * @@ -249,6 +250,7 @@ struct sdw_dp0_prop { u32 *words; bool BRA_flow_controlled; bool simple_ch_prep_sm; + u32 ch_prep_timeout; bool imp_def_interrupts; }; @@ -543,21 +545,6 @@ enum sdw_reg_bank { }; /** - * struct sdw_bus_conf: Bus configuration - * - * @clk_freq: Clock frequency, in Hz - * @num_rows: Number of rows in frame - * @num_cols: Number of columns in frame - * @bank: Next register bank - */ -struct sdw_bus_conf { - unsigned int clk_freq; - unsigned int num_rows; - unsigned int num_cols; - unsigned int bank; -}; - -/** * struct sdw_prepare_ch: Prepare/De-prepare Data Port channel * * @num: Port number @@ -899,6 +886,7 @@ struct sdw_master_ops { * @port_ops: Master port callback ops * @params: Current bus parameters * @prop: Master properties + * @vendor_specific_prop: pointer to non-standard properties * @m_rt_list: List of Master instance of all stream(s) running on Bus. This * is used to compute and program bus bandwidth, clock, frame shape, * transport and port parameters @@ -933,6 +921,7 @@ struct sdw_bus { const struct sdw_master_port_ops *port_ops; struct sdw_bus_params params; struct sdw_master_prop prop; + void *vendor_specific_prop; struct list_head m_rt_list; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 00bb22d96a..8e78417156 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -22,6 +22,7 @@ /* LCAP */ #define SDW_SHIM_LCAP 0x0 #define SDW_SHIM_LCAP_LCOUNT_MASK GENMASK(2, 0) +#define SDW_SHIM_LCAP_MLCS_MASK BIT(8) /* LCTL */ #define SDW_SHIM_LCTL 0x4 @@ -30,12 +31,18 @@ #define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0) #define SDW_SHIM_LCTL_CPA BIT(8) #define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8) +#define SDW_SHIM_LCTL_MLCS_MASK GENMASK(29, 27) +#define SDW_SHIM_MLCS_XTAL_CLK 0x0 +#define SDW_SHIM_MLCS_CARDINAL_CLK 0x1 +#define SDW_SHIM_MLCS_AUDIO_PLL_CLK 0x2 /* SYNC */ #define SDW_SHIM_SYNC 0xC -#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) -#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24_576 (24576 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_96 (96000 / SDW_CADENCE_GSYNC_KHZ - 1) #define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0) #define SDW_SHIM_SYNC_SYNCCPU BIT(15) #define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16) diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index 138bec908c..658b10fa5b 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -13,7 +13,7 @@ #define SDW_REG_NO_PAGE 0x00008000 #define SDW_REG_OPTIONAL_PAGE 0x00010000 -#define SDW_REG_MAX 0x80000000 +#define SDW_REG_MAX 0x48000000 #define SDW_DPN_SIZE 0x100 #define SDW_BANK1_OFFSET 0x10 diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h deleted file mode 100644 index ca2cd4e30e..0000000000 --- a/include/linux/spi/pxa2xx_spi.h +++ /dev/null @@ -1,56 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs - */ -#ifndef __LINUX_SPI_PXA2XX_SPI_H -#define __LINUX_SPI_PXA2XX_SPI_H - -#include <linux/dmaengine.h> -#include <linux/types.h> - -#include <linux/pxa2xx_ssp.h> - -struct dma_chan; - -/* - * The platform data for SSP controller devices - * (resides in device.platform_data). - */ -struct pxa2xx_spi_controller { - u16 num_chipselect; - u8 enable_dma; - u8 dma_burst_size; - bool is_target; - - /* DMA engine specific config */ - dma_filter_fn dma_filter; - void *tx_param; - void *rx_param; - - /* For non-PXA arches */ - struct ssp_device ssp; -}; - -/* - * The controller specific data for SPI target devices - * (resides in spi_board_info.controller_data), - * copied to spi_device.platform_data ... mostly for - * DMA tuning. - */ -struct pxa2xx_spi_chip { - u8 tx_threshold; - u8 tx_hi_threshold; - u8 rx_threshold; - u8 dma_burst_size; - u32 timeout; -}; - -#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) - -#include <linux/clk.h> - -extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); - -#endif - -#endif /* __LINUX_SPI_PXA2XX_SPI_H */ diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h deleted file mode 100644 index dbdfcc7a3d..0000000000 --- a/include/linux/spi/rspi.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Renesas SPI driver - * - * Copyright (C) 2012 Renesas Solutions Corp. - */ - -#ifndef __LINUX_SPI_RENESAS_SPI_H__ -#define __LINUX_SPI_RENESAS_SPI_H__ - -struct rspi_plat_data { - unsigned int dma_tx_id; - unsigned int dma_rx_id; - - u16 num_chipselect; -}; - -#endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index afe6631da1..67b9a15a53 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -453,6 +453,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected + * @last_cs_index_mask: bit mask the last chip selects that were used * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy * @running: message pump is running @@ -959,8 +960,8 @@ struct spi_res { * struct spi_transfer - a read/write buffer pair * @tx_buf: data to be written (DMA-safe memory), or NULL * @rx_buf: data to be read (DMA-safe memory), or NULL - * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped - * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped + * @tx_dma: DMA address of tx_buf, currently not for client use + * @rx_dma: DMA address of rx_buf, currently not for client use * @tx_nbits: number of bits used for writing. If 0 the default * (SPI_NBITS_SINGLE) is used. * @rx_nbits: number of bits used for reading. If 0 the default @@ -1070,8 +1071,7 @@ struct spi_transfer { /* * It's okay if tx_buf == rx_buf (right?). * For MicroWire, one buffer must be NULL. - * Buffers must work with dma_*map_single() calls, unless - * spi_message.is_dma_mapped reports a pre-existing mapping. + * Buffers must work with dma_*map_single() calls. */ const void *tx_buf; void *rx_buf; @@ -1116,8 +1116,6 @@ struct spi_transfer { * struct spi_message - one multi-segment SPI transaction * @transfers: list of transfer segments in this transaction * @spi: SPI device to which the transaction is queued - * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual - * addresses for each transfer buffer * @pre_optimized: peripheral driver pre-optimized the message * @optimized: the message is in the optimized state * @prepared: spi_prepare_message was called for the this message @@ -1151,8 +1149,6 @@ struct spi_message { struct spi_device *spi; - unsigned is_dma_mapped:1; - /* spi_optimize_message() was called for this message */ bool pre_optimized; /* __spi_optimize_message() was called for this message */ diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h index 3934ce789d..1b8d984668 100644 --- a/include/linux/spi/xilinx_spi.h +++ b/include/linux/spi/xilinx_spi.h @@ -2,19 +2,23 @@ #ifndef __LINUX_SPI_XILINX_SPI_H #define __LINUX_SPI_XILINX_SPI_H +#include <linux/types.h> + +struct spi_board_info; + /** * struct xspi_platform_data - Platform data of the Xilinx SPI driver - * @num_chipselect: Number of chip select by the IP. - * @little_endian: If registers should be accessed little endian or not. - * @bits_per_word: Number of bits per word. * @devices: Devices to add when the driver is probed. * @num_devices: Number of devices in the devices array. + * @num_chipselect: Number of chip select by the IP. + * @bits_per_word: Number of bits per word. + * @force_irq: If set, forces QSPI transaction requirements. */ struct xspi_platform_data { - u16 num_chipselect; - u8 bits_per_word; struct spi_board_info *devices; u8 num_devices; + u8 num_chipselect; + u8 bits_per_word; bool force_irq; }; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3fcd20de6c..63dd8cf3c3 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -462,11 +462,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return spin_is_contended(lock); -#else - return 0; -#endif } /* @@ -479,11 +478,10 @@ static inline int spin_needbreak(spinlock_t *lock) */ static inline int rwlock_needbreak(rwlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return rwlock_is_contended(lock); -#else - return 0; -#endif } /* diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 447133171d..4d96bbdb45 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -64,8 +64,10 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) { int idx; + preempt_disable(); // Needed for PREEMPT_AUTO idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); + preempt_enable(); return idx; } diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 1f326da289..a2257380c3 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -621,14 +621,6 @@ extern u32 ssb_dma_translation(struct ssb_device *dev); #define SSB_DMA_TRANSLATION_MASK 0xC0000000 #define SSB_DMA_TRANSLATION_SHIFT 30 -static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) -{ -#ifdef CONFIG_SSB_DEBUG - printk(KERN_ERR "SSB: BUG! Calling DMA API for " - "unsupported bustype %d\n", dev->bus->bustype); -#endif /* DEBUG */ -} - #ifdef CONFIG_SSB_PCIHOST /* PCI-host wrapper driver */ extern int ssb_pcihost_register(struct pci_driver *driver); diff --git a/include/linux/stat.h b/include/linux/stat.h index 52150570d3..bf92441dba 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -53,6 +53,7 @@ struct kstat { u32 dio_mem_align; u32 dio_offset_align; u64 change_cookie; + u64 subvol; }; /* These definitions are internal to the kernel for now. Mainly used by nfsd. */ diff --git a/include/linux/stm.h b/include/linux/stm.h index 3b22689512..2fcbef9608 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h @@ -30,6 +30,16 @@ enum stp_packet_flags { STP_PACKET_TIMESTAMPED = 0x2, }; +/** + * enum stm_source_type - STM source driver + * @STM_USER: any STM trace source + * @STM_FTRACE: ftrace STM source + */ +enum stm_source_type { + STM_USER, + STM_FTRACE, +}; + struct stp_policy; struct stm_device; @@ -106,6 +116,7 @@ struct stm_source_device; * @name: device name, will be used for policy lookup * @src: internal structure, only used by stm class code * @nr_chans: number of channels to allocate + * @type: type of STM source driver represented by stm_source_type * @link: called when this source gets linked to an STM device * @unlink: called when this source is about to get unlinked from its STM * @@ -117,6 +128,7 @@ struct stm_source_data { struct stm_source_device *src; unsigned int percpu; unsigned int nr_chans; + unsigned int type; int (*link)(struct stm_source_data *data); void (*unlink)(struct stm_source_data *data); }; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c0d74f97fd..f92c195c76 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -115,20 +115,6 @@ struct stmmac_axi { bool axi_rb; }; -#define EST_GCL 1024 -struct stmmac_est { - int enable; - u32 btr_reserve[2]; - u32 btr_offset[2]; - u32 btr[2]; - u32 ctr[2]; - u32 ter; - u32 gcl_unaligned[EST_GCL]; - u32 gcl[EST_GCL]; - u32 gcl_size; - u32 max_sdu[MTL_MAX_TX_QUEUES]; -}; - struct stmmac_rxq_cfg { u8 mode_to_use; u32 chan; @@ -245,7 +231,6 @@ struct plat_stmmacenet_data { struct fwnode_handle *port_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; - struct stmmac_est *est; struct stmmac_fpe_cfg *fpe_cfg; struct stmmac_safety_feature_cfg *safety_feat_cfg; int clk_csr; @@ -284,6 +269,8 @@ struct plat_stmmacenet_data { int (*crosststamp)(ktime_t *device, struct system_counterval_t *system, void *ctx); void (*dump_debug_regs)(void *priv); + int (*pcs_init)(struct stmmac_priv *priv); + void (*pcs_exit)(struct stmmac_priv *priv); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; diff --git a/include/linux/string.h b/include/linux/string.h index 9ba8b45970..9edace076d 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -14,8 +14,8 @@ #include <uapi/linux/string.h> extern char *strndup_user(const char __user *, long); -extern void *memdup_user(const void __user *, size_t); -extern void *vmemdup_user(const void __user *, size_t); +extern void *memdup_user(const void __user *, size_t) __realloc_size(2); +extern void *vmemdup_user(const void __user *, size_t) __realloc_size(2); extern void *memdup_user_nul(const void __user *, size_t); /** @@ -27,7 +27,8 @@ extern void *memdup_user_nul(const void __user *, size_t); * Return: an ERR_PTR() on failure. Result is physically * contiguous, to be freed by kfree(). */ -static inline void *memdup_array_user(const void __user *src, size_t n, size_t size) +static inline __realloc_size(2, 3) +void *memdup_array_user(const void __user *src, size_t n, size_t size) { size_t nbytes; @@ -46,7 +47,8 @@ static inline void *memdup_array_user(const void __user *src, size_t n, size_t s * Return: an ERR_PTR() on failure. Result may be not * physically contiguous. Use kvfree() to free. */ -static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size) +static inline __realloc_size(2, 3) +void *vmemdup_array_user(const void __user *src, size_t n, size_t size) { size_t nbytes; @@ -282,10 +284,13 @@ extern void kfree_const(const void *x); extern char *kstrdup(const char *s, gfp_t gfp) __malloc; extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); -extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); +extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_size(2); +#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__)) + extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); -extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp); +extern void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) + __realloc_size(2, 3); /* lib/argv_split.c */ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); @@ -423,6 +428,55 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, } while (0) /** + * memtostr - Copy a possibly non-NUL-term string to a NUL-term string + * @dest: Pointer to destination NUL-terminates string + * @src: Pointer to character array (likely marked as __nonstring) + * + * This is a replacement for strncpy() uses where the source is not + * a NUL-terminated string. + * + * Note that sizes of @dest and @src must be known at compile-time. + */ +#define memtostr(dest, src) do { \ + const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ + const size_t _src_chars = strnlen(src, _src_len); \ + const size_t _copy_len = min(_dest_len - 1, _src_chars); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ + !__builtin_constant_p(_src_len) || \ + _dest_len == 0 || _dest_len == (size_t)-1 || \ + _src_len == 0 || _src_len == (size_t)-1); \ + memcpy(dest, src, _copy_len); \ + dest[_copy_len] = '\0'; \ +} while (0) + +/** + * memtostr_pad - Copy a possibly non-NUL-term string to a NUL-term string + * with NUL padding in the destination + * @dest: Pointer to destination NUL-terminates string + * @src: Pointer to character array (likely marked as __nonstring) + * + * This is a replacement for strncpy() uses where the source is not + * a NUL-terminated string. + * + * Note that sizes of @dest and @src must be known at compile-time. + */ +#define memtostr_pad(dest, src) do { \ + const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ + const size_t _src_chars = strnlen(src, _src_len); \ + const size_t _copy_len = min(_dest_len - 1, _src_chars); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ + !__builtin_constant_p(_src_len) || \ + _dest_len == 0 || _dest_len == (size_t)-1 || \ + _src_len == 0 || _src_len == (size_t)-1); \ + memcpy(dest, src, _copy_len); \ + memset(&dest[_copy_len], 0, _dest_len - _copy_len); \ +} while (0) + +/** * memset_after - Set a value after a struct member to the end of a struct * * @obj: Address of target struct instance diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 8e20cd60e2..0981e35a9f 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -135,6 +135,9 @@ int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, struct svc_serv *); +int svc_xprt_create_from_sa(struct svc_serv *serv, const char *xprt_name, + struct net *net, struct sockaddr *sap, + int flags, const struct cred *cred); int svc_xprt_create(struct svc_serv *serv, const char *xprt_name, struct net *net, const int family, const unsigned short port, int flags, @@ -147,6 +150,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); void svc_xprt_close(struct svc_xprt *xprt); int svc_port_is_privileged(struct sockaddr *sin); int svc_print_xprts(char *buf, int maxlen); +struct svc_xprt *svc_find_listener(struct svc_serv *serv, const char *xcl_name, + struct net *net, const struct sockaddr *sa); struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, struct net *net, const sa_family_t af, const unsigned short port); diff --git a/include/linux/swap.h b/include/linux/swap.h index 7b5c33e5d8..e685e93ba3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -259,7 +259,20 @@ struct swap_cluster_info { }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ + +/* + * The first page in the swap file is the swap header, which is always marked + * bad to prevent it from being allocated as an entry. This also prevents the + * cluster to which it belongs being marked free. Therefore 0 is safe to use as + * a sentinel to indicate next is not valid in percpu_cluster. + */ +#define SWAP_NEXT_INVALID 0 + +#ifdef CONFIG_THP_SWAP +#define SWAP_NR_ORDERS (PMD_ORDER + 1) +#else +#define SWAP_NR_ORDERS 1 +#endif /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from @@ -267,8 +280,7 @@ struct swap_cluster_info { * throughput. */ struct percpu_cluster { - struct swap_cluster_info index; /* Current cluster index */ - unsigned int next; /* Likely next allocation offset */ + unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; struct swap_cluster_list { @@ -298,10 +310,8 @@ struct swap_info_struct { unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ - struct file *bdev_file; /* open handle of the bdev */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ - unsigned int old_block_size; /* seldom referenced */ struct completion comp; /* seldom referenced */ spinlock_t lock; /* * protect map scan related fields like @@ -463,14 +473,14 @@ swp_entry_t folio_alloc_swap(struct folio *folio); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); -extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); +extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free_entries(swp_entry_t *entries, int n); -extern int free_swap_and_cache(swp_entry_t); +extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); @@ -519,8 +529,9 @@ static inline void put_swap_device(struct swap_info_struct *si) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); -/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ -#define free_swap_and_cache(e) is_pfn_swap_entry(e) +static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) +{ +} static inline void free_swap_cache(struct folio *folio) { @@ -588,14 +599,10 @@ static inline int add_swap_extent(struct swap_info_struct *sis, } #endif /* CONFIG_SWAP */ -#ifdef CONFIG_THP_SWAP -extern int split_swap_cluster(swp_entry_t entry); -#else -static inline int split_swap_cluster(swp_entry_t entry) +static inline void free_swap_and_cache(swp_entry_t entry) { - return 0; + free_swap_and_cache_nr(entry, 1); } -#endif #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ea23097e35..14bc10c1bb 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -43,7 +43,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask, extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, - size_t mapping_size, size_t alloc_size, + size_t mapping_size, unsigned int alloc_aligned_mask, enum dma_data_direction dir, unsigned long attrs); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index ac3db59a74..fff820c3e9 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -322,13 +322,13 @@ asmlinkage long sys_io_pgetevents(aio_context_t ctx_id, long nr, struct io_event __user *events, struct __kernel_timespec __user *timeout, - const struct __aio_sigset *sig); + const struct __aio_sigset __user *sig); asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id, long min_nr, long nr, struct io_event __user *events, struct old_timespec32 __user *timeout, - const struct __aio_sigset *sig); + const struct __aio_sigset __user *sig); asmlinkage long sys_io_uring_setup(u32 entries, struct io_uring_params __user *p); asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit, @@ -441,7 +441,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, umode_t mode); asmlinkage long sys_openat2(int dfd, const char __user *filename, - struct open_how *how, size_t size); + struct open_how __user *how, size_t size); asmlinkage long sys_close(unsigned int fd); asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); @@ -555,7 +555,7 @@ asmlinkage long sys_get_robust_list(int pid, asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, size_t len); -asmlinkage long sys_futex_waitv(struct futex_waitv *waiters, +asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters, unsigned int nr_futexes, unsigned int flags, struct __kernel_timespec __user *timeout, clockid_t clockid); @@ -821,6 +821,7 @@ asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags); +asmlinkage long sys_mseal(unsigned long start, size_t len, unsigned long flags); asmlinkage long sys_mbind(unsigned long start, unsigned long len, unsigned long mode, const unsigned long __user *nmask, @@ -912,7 +913,7 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, asmlinkage long sys_getrandom(char __user *buf, size_t count, unsigned int flags); asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); -asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); +asmlinkage long sys_bpf(int cmd, union bpf_attr __user *attr, unsigned int size); asmlinkage long sys_execveat(int dfd, const char __user *filename, const char __user *const __user *argv, const char __user *const __user *envp, int flags); @@ -965,11 +966,11 @@ asmlinkage long sys_cachestat(unsigned int fd, struct cachestat_range __user *cstat_range, struct cachestat __user *cstat, unsigned int flags); asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags); -asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx, - u32 *size, u32 flags); -asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx, +asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx __user *ctx, + u32 __user *size, u32 flags); +asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx __user *ctx, u32 size, u32 flags); -asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags); +asmlinkage long sys_lsm_list_modules(u64 __user *ids, u32 __user *size, u32 flags); /* * Architecture-specific system calls diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index ee7d33b89e..09db2f2e64 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -137,17 +137,6 @@ struct ctl_table { void *data; int maxlen; umode_t mode; - /** - * enum type - Enumeration to differentiate between ctl target types - * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations - * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently - * empty directory target to serve - * as mount point. - */ - enum { - SYSCTL_TABLE_TYPE_DEFAULT, - SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY - } type; proc_handler *proc_handler; /* Callback for text formatting */ struct ctl_table_poll *poll; void *extra1; @@ -182,12 +171,23 @@ struct ctl_table_header { struct rcu_head rcu; }; struct completion *unregistering; - struct ctl_table *ctl_table_arg; + const struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */ + /** + * enum type - Enumeration to differentiate between ctl target types + * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations + * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently + * empty directory target to serve + * as mount point. + */ + enum { + SYSCTL_TABLE_TYPE_DEFAULT, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY, + } type; }; struct ctl_dir { @@ -205,9 +205,8 @@ struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set *(*lookup)(struct ctl_table_root *root); void (*set_ownership)(struct ctl_table_header *head, - struct ctl_table *table, kuid_t *uid, kgid_t *gid); - int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); + int (*permissions)(struct ctl_table_header *head, const struct ctl_table *table); }; #define register_sysctl(path, table) \ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 326341c623..c4e64dc112 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -371,6 +371,17 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size) #define BIN_ATTR_ADMIN_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size) +#define __BIN_ATTR_SIMPLE_RO(_name, _mode) { \ + .attr = { .name = __stringify(_name), .mode = _mode }, \ + .read = sysfs_bin_attr_simple_read, \ +} + +#define BIN_ATTR_SIMPLE_RO(_name) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0444) + +#define BIN_ATTR_SIMPLE_ADMIN_RO(_name) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0400) + struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); @@ -478,6 +489,10 @@ int sysfs_emit(char *buf, const char *fmt, ...); __printf(3, 4) int sysfs_emit_at(char *buf, int at, const char *fmt, ...); +ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count); + #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) @@ -735,6 +750,15 @@ static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) { return 0; } + +static inline ssize_t sysfs_bin_attr_simple_read(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return 0; +} #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 795ef5a684..26b8a47f41 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -30,7 +30,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork, struct callback_head *task_work_cancel_match(struct task_struct *task, bool (*match)(struct callback_head *, void *data), void *data); -struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); +struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t); +bool task_work_cancel(struct task_struct *task, struct callback_head *cb); void task_work_run(void); static inline void exit_task_work(struct task_struct *task) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 55399ee2a5..6a5e08b937 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -244,6 +244,7 @@ struct tcp_sock { /* OOO segments go in this rbtree. Socket lock must be held. */ struct rb_root out_of_order_queue; u32 snd_ssthresh; /* Slow start size threshold */ + u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */ __cacheline_group_end(tcp_sock_read_rx); /* TX read-write hotpath cache lines */ @@ -266,8 +267,6 @@ struct tcp_sock { u32 mdev_us; /* medium deviation */ u32 rtt_seq; /* sequence number to update rttvar */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ - u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ - u64 tcp_mstamp; /* most recent packet received/sent */ struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ struct sk_buff *highest_sack; /* skb just after the highest * skb with SACKed bit set @@ -284,6 +283,8 @@ struct tcp_sock { * 0x5?10 << 16 + snd_wnd in net byte order */ __be32 pred_flags; + u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ + u64 tcp_mstamp; /* most recent packet received/sent */ u32 rcv_nxt; /* What we want to receive next */ u32 snd_nxt; /* Next sequence we send */ u32 snd_una; /* First byte we want an ack for */ @@ -370,7 +371,6 @@ struct tcp_sock { tlp_retrans:1, /* TLP is a retransmission */ unused:5; u8 thin_lto : 1,/* Use linear timeouts for thin streams */ - recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ fastopen_client_fail:2, /* reason why fastopen failed */ diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h new file mode 100644 index 0000000000..efd16ed523 --- /dev/null +++ b/include/linux/tee_core.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Linaro Limited + */ + +#ifndef __TEE_CORE_H +#define __TEE_CORE_H + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/tee.h> +#include <linux/tee_drv.h> +#include <linux/types.h> +#include <linux/uuid.h> + +/* + * The file describes the API provided by the generic TEE driver to the + * specific TEE driver. + */ + +#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */ + /* in secure world */ +#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ +#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ +#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ + +#define TEE_DEVICE_FLAG_REGISTERED 0x1 +#define TEE_MAX_DEV_NAME_LEN 32 + +/** + * struct tee_device - TEE Device representation + * @name: name of device + * @desc: description of device + * @id: unique id of device + * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above + * @dev: embedded basic device structure + * @cdev: embedded cdev + * @num_users: number of active users of this device + * @c_no_user: completion used when unregistering the device + * @mutex: mutex protecting @num_users and @idr + * @idr: register of user space shared memory objects allocated or + * registered on this device + * @pool: shared memory pool + */ +struct tee_device { + char name[TEE_MAX_DEV_NAME_LEN]; + const struct tee_desc *desc; + int id; + unsigned int flags; + + struct device dev; + struct cdev cdev; + + size_t num_users; + struct completion c_no_users; + struct mutex mutex; /* protects num_users and idr */ + + struct idr idr; + struct tee_shm_pool *pool; +}; + +/** + * struct tee_driver_ops - driver operations vtable + * @get_version: returns version of driver + * @open: called when the device file is opened + * @release: release this open file + * @open_session: open a new session + * @close_session: close a session + * @system_session: declare session as a system session + * @invoke_func: invoke a trusted function + * @cancel_req: request cancel of an ongoing invoke or open + * @supp_recv: called for supplicant to get a command + * @supp_send: called for supplicant to send a response + * @shm_register: register shared memory buffer in TEE + * @shm_unregister: unregister shared memory buffer in TEE + */ +struct tee_driver_ops { + void (*get_version)(struct tee_device *teedev, + struct tee_ioctl_version_data *vers); + int (*open)(struct tee_context *ctx); + void (*release)(struct tee_context *ctx); + int (*open_session)(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); + int (*close_session)(struct tee_context *ctx, u32 session); + int (*system_session)(struct tee_context *ctx, u32 session); + int (*invoke_func)(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); + int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); + int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); + int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); +}; + +/** + * struct tee_desc - Describes the TEE driver to the subsystem + * @name: name of driver + * @ops: driver operations vtable + * @owner: module providing the driver + * @flags: Extra properties of driver, defined by TEE_DESC_* below + */ +#define TEE_DESC_PRIVILEGED 0x1 +struct tee_desc { + const char *name; + const struct tee_driver_ops *ops; + struct module *owner; + u32 flags; +}; + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data); + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev); + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev); + +/** + * tee_session_calc_client_uuid() - Calculates client UUID for session + * @uuid: Resulting UUID + * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) + * @connectuon_data: Connection data for opening session + * + * Based on connection method calculates UUIDv5 based client UUID. + * + * For group based logins verifies that calling process has specified + * credentials. + * + * @return < 0 on failure + */ +int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, + const u8 connection_data[TEE_IOCTL_UUID_LEN]); + +/** + * struct tee_shm_pool - shared memory pool + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool { + const struct tee_shm_pool_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool_ops - shared memory pool operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + * @destroy_pool: called when destroying the pool + */ +struct tee_shm_pool_ops { + int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align); + void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm); + void (*destroy_pool)(struct tee_shm_pool *pool); +}; + +/* + * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory + * @vaddr: Virtual address of start of pool + * @paddr: Physical address of start of pool + * @size: Size in bytes of the pool + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, size_t size, + int min_alloc_order); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * The must be no remaining shared memory allocated from this pool when + * this function is called. + */ +static inline void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->ops->destroy_pool(pool); +} + +/** + * tee_get_drvdata() - Return driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev); + +/** + * tee_shm_alloc_priv_buf() - Allocate shared memory for private use by specific + * TEE driver + * @ctx: The TEE context for shared memory allocation + * @size: Shared memory allocation size + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); + +int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, + int (*shm_register)(struct tee_context *ctx, + struct tee_shm *shm, + struct page **pages, + size_t num_pages, + unsigned long start)); +void tee_dyn_shm_free_helper(struct tee_shm *shm, + int (*shm_unregister)(struct tee_context *ctx, + struct tee_shm *shm)); + +/** + * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind + * @shm: Shared memory handle + * @returns true if object is dynamic shared memory + */ +static inline bool tee_shm_is_dynamic(struct tee_shm *shm) +{ + return shm && (shm->flags & TEE_SHM_DYNAMIC); +} + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +static inline int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); + +static inline bool tee_param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + +/** + * teedev_open() - Open a struct tee_device + * @teedev: Device to open + * + * @return a pointer to struct tee_context on success or an ERR_PTR on failure. + */ +struct tee_context *teedev_open(struct tee_device *teedev); + +/** + * teedev_close_context() - closes a struct tee_context + * @ctx: The struct tee_context to close + */ +void teedev_close_context(struct tee_context *ctx); + +#endif /*__TEE_CORE_H*/ diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 71632e3c5f..786b9ae6cf 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -1,40 +1,28 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2022 Linaro Limited + * Copyright (c) 2015-2024 Linaro Limited */ #ifndef __TEE_DRV_H #define __TEE_DRV_H #include <linux/device.h> -#include <linux/idr.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/tee.h> #include <linux/types.h> -#include <linux/uuid.h> /* - * The file describes the API provided by the generic TEE driver to the - * specific TEE driver. + * The file describes the API provided by the TEE subsystem to the + * TEE client drivers. */ -#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */ - /* in secure world */ -#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ -#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ -#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ - -struct device; struct tee_device; -struct tee_shm; -struct tee_shm_pool; /** * struct tee_context - driver specific context on file pointer data * @teedev: pointer to this drivers struct tee_device - * @list_shm: List of shared memory object owned by this context * @data: driver specific context data, managed by the driver * @refcount: reference counter for this structure * @releasing: flag that indicates if context is being released right now. @@ -57,134 +45,6 @@ struct tee_context { bool cap_memref_null; }; -struct tee_param_memref { - size_t shm_offs; - size_t size; - struct tee_shm *shm; -}; - -struct tee_param_value { - u64 a; - u64 b; - u64 c; -}; - -struct tee_param { - u64 attr; - union { - struct tee_param_memref memref; - struct tee_param_value value; - } u; -}; - -/** - * struct tee_driver_ops - driver operations vtable - * @get_version: returns version of driver - * @open: called when the device file is opened - * @release: release this open file - * @open_session: open a new session - * @close_session: close a session - * @system_session: declare session as a system session - * @invoke_func: invoke a trusted function - * @cancel_req: request cancel of an ongoing invoke or open - * @supp_recv: called for supplicant to get a command - * @supp_send: called for supplicant to send a response - * @shm_register: register shared memory buffer in TEE - * @shm_unregister: unregister shared memory buffer in TEE - */ -struct tee_driver_ops { - void (*get_version)(struct tee_device *teedev, - struct tee_ioctl_version_data *vers); - int (*open)(struct tee_context *ctx); - void (*release)(struct tee_context *ctx); - int (*open_session)(struct tee_context *ctx, - struct tee_ioctl_open_session_arg *arg, - struct tee_param *param); - int (*close_session)(struct tee_context *ctx, u32 session); - int (*system_session)(struct tee_context *ctx, u32 session); - int (*invoke_func)(struct tee_context *ctx, - struct tee_ioctl_invoke_arg *arg, - struct tee_param *param); - int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); - int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, - struct tee_param *param); - int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, - struct tee_param *param); - int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, - struct page **pages, size_t num_pages, - unsigned long start); - int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); -}; - -/** - * struct tee_desc - Describes the TEE driver to the subsystem - * @name: name of driver - * @ops: driver operations vtable - * @owner: module providing the driver - * @flags: Extra properties of driver, defined by TEE_DESC_* below - */ -#define TEE_DESC_PRIVILEGED 0x1 -struct tee_desc { - const char *name; - const struct tee_driver_ops *ops; - struct module *owner; - u32 flags; -}; - -/** - * tee_device_alloc() - Allocate a new struct tee_device instance - * @teedesc: Descriptor for this driver - * @dev: Parent device for this device - * @pool: Shared memory pool, NULL if not used - * @driver_data: Private driver data for this device - * - * Allocates a new struct tee_device instance. The device is - * removed by tee_device_unregister(). - * - * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure - */ -struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, - struct device *dev, - struct tee_shm_pool *pool, - void *driver_data); - -/** - * tee_device_register() - Registers a TEE device - * @teedev: Device to register - * - * tee_device_unregister() need to be called to remove the @teedev if - * this function fails. - * - * @returns < 0 on failure - */ -int tee_device_register(struct tee_device *teedev); - -/** - * tee_device_unregister() - Removes a TEE device - * @teedev: Device to unregister - * - * This function should be called to remove the @teedev even if - * tee_device_register() hasn't been called yet. Does nothing if - * @teedev is NULL. - */ -void tee_device_unregister(struct tee_device *teedev); - -/** - * tee_session_calc_client_uuid() - Calculates client UUID for session - * @uuid: Resulting UUID - * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) - * @connectuon_data: Connection data for opening session - * - * Based on connection method calculates UUIDv5 based client UUID. - * - * For group based logins verifies that calling process has specified - * credentials. - * - * @return < 0 on failure - */ -int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, - const u8 connection_data[TEE_IOCTL_UUID_LEN]); - /** * struct tee_shm - shared memory object * @ctx: context using the object @@ -195,15 +55,12 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, * @pages: locked pages from userspace * @num_pages: number of locked pages * @refcount: reference counter - * @flags: defined by TEE_SHM_* in tee_drv.h + * @flags: defined by TEE_SHM_* in tee_core.h * @id: unique id of a shared memory object on this device, shared * with user space * @sec_world_id: * secure world assigned id of this shared memory object, not * used by all drivers - * - * This pool is only supposed to be accessed directly from the TEE - * subsystem and from drivers that implements their own shm pool manager. */ struct tee_shm { struct tee_context *ctx; @@ -219,88 +76,53 @@ struct tee_shm { u64 sec_world_id; }; -/** - * struct tee_shm_pool - shared memory pool - * @ops: operations - * @private_data: private data for the shared memory manager - */ -struct tee_shm_pool { - const struct tee_shm_pool_ops *ops; - void *private_data; +struct tee_param_memref { + size_t shm_offs; + size_t size; + struct tee_shm *shm; }; -/** - * struct tee_shm_pool_ops - shared memory pool operations - * @alloc: called when allocating shared memory - * @free: called when freeing shared memory - * @destroy_pool: called when destroying the pool - */ -struct tee_shm_pool_ops { - int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm, - size_t size, size_t align); - void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm); - void (*destroy_pool)(struct tee_shm_pool *pool); +struct tee_param_value { + u64 a; + u64 b; + u64 c; }; -/* - * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory - * @vaddr: Virtual address of start of pool - * @paddr: Physical address of start of pool - * @size: Size in bytes of the pool - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, - phys_addr_t paddr, size_t size, - int min_alloc_order); +struct tee_param { + u64 attr; + union { + struct tee_param_memref memref; + struct tee_param_value value; + } u; +}; /** - * tee_shm_pool_free() - Free a shared memory pool - * @pool: The shared memory pool to free - * - * The must be no remaining shared memory allocated from this pool when - * this function is called. + * tee_shm_alloc_kernel_buf() - Allocate kernel shared memory for a + * particular TEE client driver + * @ctx: The TEE context for shared memory allocation + * @size: Shared memory allocation size + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure */ -static inline void tee_shm_pool_free(struct tee_shm_pool *pool) -{ - pool->ops->destroy_pool(pool); -} +struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** - * tee_get_drvdata() - Return driver_data pointer - * @returns the driver_data pointer supplied to tee_register(). + * tee_shm_register_kernel_buf() - Register kernel shared memory for a + * particular TEE client driver + * @ctx: The TEE context for shared memory registration + * @addr: Kernel buffer address + * @length: Kernel buffer length + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure */ -void *tee_get_drvdata(struct tee_device *teedev); - -struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); -struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); - struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length); /** - * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind - * @shm: Shared memory handle - * @returns true if object is dynamic shared memory - */ -static inline bool tee_shm_is_dynamic(struct tee_shm *shm) -{ - return shm && (shm->flags & TEE_SHM_DYNAMIC); -} - -/** * tee_shm_free() - Free shared memory * @shm: Handle to shared memory to free */ void tee_shm_free(struct tee_shm *shm); /** - * tee_shm_put() - Decrease reference count on a shared memory handle - * @shm: Shared memory handle - */ -void tee_shm_put(struct tee_shm *shm); - -/** * tee_shm_get_va() - Get virtual address of a shared memory plus an offset * @shm: Shared memory handle * @offs: Offset from start of this shared memory @@ -353,25 +175,6 @@ static inline size_t tee_shm_get_page_offset(struct tee_shm *shm) } /** - * tee_shm_get_id() - Get id of a shared memory object - * @shm: Shared memory handle - * @returns id - */ -static inline int tee_shm_get_id(struct tee_shm *shm) -{ - return shm->id; -} - -/** - * tee_shm_get_from_id() - Find shared memory object and increase reference - * count - * @ctx: Context owning the shared memory - * @id: Id of shared memory object - * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure - */ -struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); - -/** * tee_client_open_context() - Open a TEE context * @start: if not NULL, continue search after this context * @match: function to check TEE device @@ -470,18 +273,6 @@ int tee_client_invoke_func(struct tee_context *ctx, int tee_client_cancel_req(struct tee_context *ctx, struct tee_ioctl_cancel_arg *arg); -static inline bool tee_param_is_memref(struct tee_param *param) -{ - switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - return true; - default: - return false; - } -} - extern const struct bus_type tee_bus_type; /** @@ -509,18 +300,4 @@ struct tee_client_driver { #define to_tee_client_driver(d) \ container_of(d, struct tee_client_driver, driver) -/** - * teedev_open() - Open a struct tee_device - * @teedev: Device to open - * - * @return a pointer to struct tee_context on success or an ERR_PTR on failure. - */ -struct tee_context *teedev_open(struct tee_device *teedev); - -/** - * teedev_close_context() - closes a struct tee_context - * @ctx: The struct tee_context to close - */ -void teedev_close_context(struct tee_context *ctx); - #endif /*__TEE_DRV_H*/ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c33f50177f..f1155c0439 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -61,7 +61,6 @@ enum thermal_notify_event { * struct thermal_trip - representation of a point in temperature domain * @temperature: temperature value in miliCelsius * @hysteresis: relative hysteresis in miliCelsius - * @threshold: trip crossing notification threshold miliCelsius * @type: trip point type * @priv: pointer to driver data associated with this trip * @flags: flags representing binary properties of the trip @@ -69,7 +68,6 @@ enum thermal_notify_event { struct thermal_trip { int temperature; int hysteresis; - int threshold; enum thermal_trip_type type; u8 flags; void *priv; @@ -81,6 +79,8 @@ struct thermal_trip { #define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \ THERMAL_TRIP_FLAG_RW_HYST) +struct thermal_zone_device; + struct thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); @@ -126,111 +126,6 @@ struct thermal_cooling_device { #endif }; -/** - * struct thermal_zone_device - structure for a thermal zone - * @id: unique id number for each thermal zone - * @type: the thermal zone device type - * @device: &struct device for this thermal zone - * @removal: removal completion - * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature - * @trip_type_attrs: attributes for trip points for sysfs: trip type - * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis - * @mode: current mode of this thermal zone - * @devdata: private pointer for device private data - * @num_trips: number of trip points the thermal zone supports - * @passive_delay_jiffies: number of jiffies to wait between polls when - * performing passive cooling. - * @polling_delay_jiffies: number of jiffies to wait between polls when - * checking whether trip points have been crossed (0 for - * interrupt driven systems) - * @temperature: current temperature. This is only for core code, - * drivers should use thermal_zone_get_temp() to get the - * current temperature - * @last_temperature: previous temperature read - * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION - * @passive: 1 if you've crossed a passive trip point, 0 otherwise. - * @prev_low_trip: the low current temperature if you've crossed a passive - trip point. - * @prev_high_trip: the above current temperature if you've crossed a - passive trip point. - * @need_update: if equals 1, thermal_zone_device_update needs to be invoked. - * @ops: operations this &thermal_zone_device supports - * @tzp: thermal zone parameters - * @governor: pointer to the governor for this thermal zone - * @governor_data: private pointer for governor data - * @thermal_instances: list of &struct thermal_instance of this thermal zone - * @ida: &struct ida to generate unique id for this zone's cooling - * devices - * @lock: lock to protect thermal_instances list - * @node: node in thermal_tz_list (in thermal_core.c) - * @poll_queue: delayed work for polling - * @notify_event: Last notification event - * @suspended: thermal zone suspend indicator - * @trips: array of struct thermal_trip objects - */ -struct thermal_zone_device { - int id; - char type[THERMAL_NAME_LENGTH]; - struct device device; - struct completion removal; - struct attribute_group trips_attribute_group; - struct thermal_attr *trip_temp_attrs; - struct thermal_attr *trip_type_attrs; - struct thermal_attr *trip_hyst_attrs; - enum thermal_device_mode mode; - void *devdata; - int num_trips; - unsigned long passive_delay_jiffies; - unsigned long polling_delay_jiffies; - int temperature; - int last_temperature; - int emul_temperature; - int passive; - int prev_low_trip; - int prev_high_trip; - atomic_t need_update; - struct thermal_zone_device_ops ops; - struct thermal_zone_params *tzp; - struct thermal_governor *governor; - void *governor_data; - struct list_head thermal_instances; - struct ida ida; - struct mutex lock; - struct list_head node; - struct delayed_work poll_queue; - enum thermal_notify_event notify_event; - bool suspended; -#ifdef CONFIG_THERMAL_DEBUGFS - struct thermal_debugfs *debugfs; -#endif - struct thermal_trip trips[] __counted_by(num_trips); -}; - -/** - * struct thermal_governor - structure that holds thermal governor information - * @name: name of the governor - * @bind_to_tz: callback called when binding to a thermal zone. If it - * returns 0, the governor is bound to the thermal zone, - * otherwise it fails. - * @unbind_from_tz: callback called when a governor is unbound from a - * thermal zone. - * @throttle: callback called for every trip point even if temperature is - * below the trip point temperature - * @update_tz: callback called when thermal zone internals have changed, e.g. - * thermal cooling instance was added/removed - * @governor_list: node in thermal_governor_list (in thermal_core.c) - */ -struct thermal_governor { - const char *name; - int (*bind_to_tz)(struct thermal_zone_device *tz); - void (*unbind_from_tz)(struct thermal_zone_device *tz); - int (*throttle)(struct thermal_zone_device *tz, - const struct thermal_trip *trip); - void (*update_tz)(struct thermal_zone_device *tz, - enum thermal_notify_event reason); - struct list_head governor_list; -}; - /* Structure to define Thermal Zone parameters */ struct thermal_zone_params { const char *governor_name; diff --git a/include/linux/threads.h b/include/linux/threads.h index c34173e6c5..1674a471b0 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -25,13 +25,13 @@ /* * This controls the default maximum pid allocated to a process */ -#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) +#define PID_MAX_DEFAULT (IS_ENABLED(CONFIG_BASE_SMALL) ? 0x1000 : 0x8000) /* * A maximum of 4 million PIDs should be enough for a while. * [NOTE: PID/TIDs are limited to 2^30 ~= 1 billion, see FUTEX_TID_MASK.] */ -#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ +#define PID_MAX_LIMIT (IS_ENABLED(CONFIG_BASE_SMALL) ? PAGE_SIZE * 8 : \ (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) /* diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 4338ea9ac4..7d902d8c05 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -33,7 +33,6 @@ enum tb_cfg_pkg_type { TB_CFG_PKG_ICM_EVENT = 10, TB_CFG_PKG_ICM_CMD = 11, TB_CFG_PKG_ICM_RESP = 12, - TB_CFG_PKG_PREPARE_TO_SLEEP = 13, }; /** diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 62973f7d46..d306d9dd22 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -37,11 +37,6 @@ static inline bool timerqueue_node_queued(struct timerqueue_node *node) return !RB_EMPTY_NODE(&node->node); } -static inline bool timerqueue_node_expires(struct timerqueue_node *node) -{ - return node->expires; -} - static inline void timerqueue_init_head(struct timerqueue_head *head) { head->rb_root = RB_ROOT_CACHED; diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 4ee9d13749..e93ee8d936 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -23,6 +23,7 @@ #include <linux/fs.h> #include <linux/highmem.h> #include <crypto/hash_info.h> +#include <crypto/aes.h> #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ #define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE @@ -30,17 +31,28 @@ struct tpm_chip; struct trusted_key_payload; struct trusted_key_options; +/* opaque structure, holds auth session parameters like the session key */ +struct tpm2_auth; + +enum tpm2_session_types { + TPM2_SE_HMAC = 0x00, + TPM2_SE_POLICY = 0x01, + TPM2_SE_TRIAL = 0x02, +}; /* if you add a new hash to this, increment TPM_MAX_HASHES below */ enum tpm_algorithms { TPM_ALG_ERROR = 0x0000, TPM_ALG_SHA1 = 0x0004, + TPM_ALG_AES = 0x0006, TPM_ALG_KEYEDHASH = 0x0008, TPM_ALG_SHA256 = 0x000B, TPM_ALG_SHA384 = 0x000C, TPM_ALG_SHA512 = 0x000D, TPM_ALG_NULL = 0x0010, TPM_ALG_SM3_256 = 0x0012, + TPM_ALG_ECC = 0x0023, + TPM_ALG_CFB = 0x0043, }; /* @@ -49,6 +61,11 @@ enum tpm_algorithms { */ #define TPM_MAX_HASHES 5 +enum tpm2_curves { + TPM2_ECC_NONE = 0x0000, + TPM2_ECC_NIST_P256 = 0x0003, +}; + struct tpm_digest { u16 alg_id; u8 digest[TPM_MAX_DIGEST_SIZE]; @@ -116,6 +133,20 @@ struct tpm_chip_seqops { const struct seq_operations *seqops; }; +/* fixed define for the curve we use which is NIST_P256 */ +#define EC_PT_SZ 32 + +/* + * fixed define for the size of a name. This is actually HASHALG size + * plus 2, so 32 for SHA256 + */ +#define TPM2_NAME_SIZE 34 + +/* + * The maximum size for an object context + */ +#define TPM2_MAX_CONTEXT_SIZE 4096 + struct tpm_chip { struct device dev; struct device devs; @@ -170,6 +201,18 @@ struct tpm_chip { /* active locality */ int locality; + +#ifdef CONFIG_TCG_TPM2_HMAC + /* details for communication security via sessions */ + + /* saved context for NULL seed */ + u8 null_key_context[TPM2_MAX_CONTEXT_SIZE]; + /* name of NULL seed */ + u8 null_key_name[TPM2_NAME_SIZE]; + u8 null_ec_key_x[EC_PT_SZ]; + u8 null_ec_key_y[EC_PT_SZ]; + struct tpm2_auth *auth; +#endif }; #define TPM_HEADER_SIZE 10 @@ -194,6 +237,7 @@ enum tpm2_timeouts { enum tpm2_structures { TPM2_ST_NO_SESSIONS = 0x8001, TPM2_ST_SESSIONS = 0x8002, + TPM2_ST_CREATION = 0x8021, }; /* Indicates from what layer of the software stack the error comes from */ @@ -204,6 +248,7 @@ enum tpm2_return_codes { TPM2_RC_SUCCESS = 0x0000, TPM2_RC_HASH = 0x0083, /* RC_FMT1 */ TPM2_RC_HANDLE = 0x008B, + TPM2_RC_INTEGRITY = 0x009F, TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */ TPM2_RC_FAILURE = 0x0101, TPM2_RC_DISABLED = 0x0120, @@ -231,6 +276,8 @@ enum tpm2_command_codes { TPM2_CC_CONTEXT_LOAD = 0x0161, TPM2_CC_CONTEXT_SAVE = 0x0162, TPM2_CC_FLUSH_CONTEXT = 0x0165, + TPM2_CC_READ_PUBLIC = 0x0173, + TPM2_CC_START_AUTH_SESS = 0x0176, TPM2_CC_VERIFY_SIGNATURE = 0x0177, TPM2_CC_GET_CAPABILITY = 0x017A, TPM2_CC_GET_RANDOM = 0x017B, @@ -243,9 +290,25 @@ enum tpm2_command_codes { }; enum tpm2_permanent_handles { + TPM2_RH_NULL = 0x40000007, TPM2_RS_PW = 0x40000009, }; +/* Most Significant Octet for key types */ +enum tpm2_mso_type { + TPM2_MSO_NVRAM = 0x01, + TPM2_MSO_SESSION = 0x02, + TPM2_MSO_POLICY = 0x03, + TPM2_MSO_PERMANENT = 0x40, + TPM2_MSO_VOLATILE = 0x80, + TPM2_MSO_PERSISTENT = 0x81, +}; + +static inline enum tpm2_mso_type tpm2_handle_mso(u32 handle) +{ + return handle >> 24; +} + enum tpm2_capabilities { TPM2_CAP_HANDLES = 1, TPM2_CAP_COMMANDS = 2, @@ -284,6 +347,7 @@ enum tpm_chip_flags { TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7), TPM_CHIP_FLAG_SUSPENDED = BIT(8), TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9), + TPM_CHIP_FLAG_DISABLE = BIT(10), }; #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) @@ -297,28 +361,46 @@ struct tpm_header { }; } __packed; -/* A string buffer type for constructing TPM commands. This is based on the - * ideas of string buffer code in security/keys/trusted.h but is heap based - * in order to keep the stack usage minimal. - */ - enum tpm_buf_flags { + /* the capacity exceeded: */ TPM_BUF_OVERFLOW = BIT(0), + /* TPM2B format: */ + TPM_BUF_TPM2B = BIT(1), + /* read out of boundary: */ + TPM_BUF_BOUNDARY_ERROR = BIT(2), }; +/* + * A string buffer type for constructing TPM commands. + */ struct tpm_buf { - unsigned int flags; + u32 flags; + u32 length; u8 *data; + u8 handles; }; enum tpm2_object_attributes { TPM2_OA_FIXED_TPM = BIT(1), + TPM2_OA_ST_CLEAR = BIT(2), TPM2_OA_FIXED_PARENT = BIT(4), + TPM2_OA_SENSITIVE_DATA_ORIGIN = BIT(5), TPM2_OA_USER_WITH_AUTH = BIT(6), + TPM2_OA_ADMIN_WITH_POLICY = BIT(7), + TPM2_OA_NO_DA = BIT(10), + TPM2_OA_ENCRYPTED_DUPLICATION = BIT(11), + TPM2_OA_RESTRICTED = BIT(16), + TPM2_OA_DECRYPT = BIT(17), + TPM2_OA_SIGN = BIT(18), }; enum tpm2_session_attributes { TPM2_SA_CONTINUE_SESSION = BIT(0), + TPM2_SA_AUDIT_EXCLUSIVE = BIT(1), + TPM2_SA_AUDIT_RESET = BIT(3), + TPM2_SA_DECRYPT = BIT(5), + TPM2_SA_ENCRYPT = BIT(6), + TPM2_SA_AUDIT = BIT(7), }; struct tpm2_hash { @@ -326,84 +408,19 @@ struct tpm2_hash { unsigned int tpm_id; }; -static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - - head->tag = cpu_to_be16(tag); - head->length = cpu_to_be32(sizeof(*head)); - head->ordinal = cpu_to_be32(ordinal); -} - -static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal) -{ - buf->data = (u8 *)__get_free_page(GFP_KERNEL); - if (!buf->data) - return -ENOMEM; - - buf->flags = 0; - tpm_buf_reset(buf, tag, ordinal); - return 0; -} - -static inline void tpm_buf_destroy(struct tpm_buf *buf) -{ - free_page((unsigned long)buf->data); -} - -static inline u32 tpm_buf_length(struct tpm_buf *buf) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - - return be32_to_cpu(head->length); -} - -static inline u16 tpm_buf_tag(struct tpm_buf *buf) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - - return be16_to_cpu(head->tag); -} - -static inline void tpm_buf_append(struct tpm_buf *buf, - const unsigned char *new_data, - unsigned int new_len) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - u32 len = tpm_buf_length(buf); - - /* Return silently if overflow has already happened. */ - if (buf->flags & TPM_BUF_OVERFLOW) - return; - - if ((len + new_len) > PAGE_SIZE) { - WARN(1, "tpm_buf: overflow\n"); - buf->flags |= TPM_BUF_OVERFLOW; - return; - } - - memcpy(&buf->data[len], new_data, new_len); - head->length = cpu_to_be32(len + new_len); -} - -static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value) -{ - tpm_buf_append(buf, &value, 1); -} - -static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value) -{ - __be16 value2 = cpu_to_be16(value); - - tpm_buf_append(buf, (u8 *) &value2, 2); -} - -static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) -{ - __be32 value2 = cpu_to_be32(value); - - tpm_buf_append(buf, (u8 *) &value2, 4); -} +int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal); +void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal); +int tpm_buf_init_sized(struct tpm_buf *buf); +void tpm_buf_reset_sized(struct tpm_buf *buf); +void tpm_buf_destroy(struct tpm_buf *buf); +u32 tpm_buf_length(struct tpm_buf *buf); +void tpm_buf_append(struct tpm_buf *buf, const u8 *new_data, u16 new_length); +void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value); +void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value); +void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value); +u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset); +u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset); +u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset); /* * Check if TPM device is in the firmware upgrade mode. @@ -415,7 +432,7 @@ static inline bool tpm_is_firmware_upgrade(struct tpm_chip *chip) static inline u32 tpm2_rc_value(u32 rc) { - return (rc & BIT(7)) ? rc & 0xff : rc; + return (rc & BIT(7)) ? rc & 0xbf : rc; } #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) @@ -429,10 +446,19 @@ extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digest); extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, struct tpm_digest *digests); -extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); extern struct tpm_chip *tpm_default_chip(void); void tpm2_flush_context(struct tpm_chip *chip, u32 handle); + +static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle) +{ + /* simple authorization for empty auth */ + tpm_buf_append_u32(buf, 9); /* total length of auth */ + tpm_buf_append_u32(buf, handle); + tpm_buf_append_u16(buf, 0); /* nonce len */ + tpm_buf_append_u8(buf, 0); /* attributes */ + tpm_buf_append_u16(buf, 0); /* hmac len */ +} #else static inline int tpm_is_tpm2(struct tpm_chip *chip) { @@ -450,10 +476,6 @@ static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, return -ENODEV; } -static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) -{ - return -ENODEV; -} static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max) { return -ENODEV; @@ -463,5 +485,77 @@ static inline struct tpm_chip *tpm_default_chip(void) { return NULL; } + +static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle) +{ +} #endif + +static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + return chip->auth; +#else + return NULL; +#endif +} + +void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name); +void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, + int passphraselen); +static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, + struct tpm_buf *buf, + u8 attributes, + u8 *passphrase, + int passphraselen) +{ + struct tpm_header *head; + int offset; + + if (tpm2_chip_auth(chip)) { + tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, passphraselen); + } else { + offset = buf->handles * 4 + TPM_HEADER_SIZE; + head = (struct tpm_header *)buf->data; + + /* + * If the only sessions are optional, the command tag must change to + * TPM2_ST_NO_SESSIONS. + */ + if (tpm_buf_length(buf) == offset) + head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + } +} + +#ifdef CONFIG_TCG_TPM2_HMAC + +int tpm2_start_auth_session(struct tpm_chip *chip); +void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf); +int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, + int rc); +void tpm2_end_auth_session(struct tpm_chip *chip); +#else +#include <asm/unaligned.h> + +static inline int tpm2_start_auth_session(struct tpm_chip *chip) +{ + return 0; +} +static inline void tpm2_end_auth_session(struct tpm_chip *chip) +{ +} +static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip, + struct tpm_buf *buf) +{ +} +static inline int tpm_buf_check_hmac_response(struct tpm_chip *chip, + struct tpm_buf *buf, + int rc) +{ + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ + #endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 6f9bdfb09d..9df3e29736 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -765,8 +765,11 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); -int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); -int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); + +struct bpf_raw_tp_link; +int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link); +int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link); + struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, @@ -794,11 +797,12 @@ perf_event_query_prog_array(struct perf_event *event, void __user *info) { return -EOPNOTSUPP; } -static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p) +struct bpf_raw_tp_link; +static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) { return -EOPNOTSUPP; } -static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p) +static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) { return -EOPNOTSUPP; } @@ -909,31 +913,31 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); void perf_event_free_bpf_prog(struct perf_event *event); -void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); -void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); -void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1); +void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2); +void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3); -void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4); -void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5); -void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); -void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); -void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8); -void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9); -void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10); -void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10, u64 arg11); -void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index d48cd92d23..24ea8ac049 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -135,7 +135,7 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip); # define do_ftrace_record_recursion(ip, pip) do { } while (0) #endif -#ifdef CONFIG_ARCH_WANTS_NO_INSTR +#ifdef CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING # define trace_warn_on_no_rcu(ip) \ ({ \ bool __ret = !rcu_is_watching(); \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 2b2e6f0a54..2372f93572 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -145,15 +145,12 @@ struct tty_operations; * @count: count of open processes, reaching zero cancels all the work for * this tty and drops a @kref too (but does not free this tty) * @winsize: size of the terminal "window" (cf. @winsize_mutex) - * @flow: flow settings grouped together, see also @flow.unused + * @flow: flow settings grouped together * @flow.lock: lock for @flow members * @flow.stopped: tty stopped/started by stop_tty()/start_tty() * @flow.tco_stopped: tty stopped/started by %TCOOFF/%TCOON ioctls (it has * precedence over @flow.stopped) - * @flow.unused: alignment for Alpha, so that no members other than @flow.* are - * modified by the same 64b word store. The @flow's __aligned is - * there for the very same reason. - * @ctrl: control settings grouped together, see also @ctrl.unused + * @ctrl: control settings grouped together * @ctrl.lock: lock for @ctrl members * @ctrl.pgrp: process group of this tty (setpgrp(2)) * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both @@ -161,7 +158,6 @@ struct tty_operations; * them. * @ctrl.pktstatus: packet mode status (bitwise OR of %TIOCPKT_ constants) * @ctrl.packet: packet mode enabled - * @ctrl.unused: alignment for Alpha, see @flow.unused for explanation * @hw_stopped: not controlled by the tty layer, under @driver's control for CTS * handling * @receive_room: bytes permitted to feed to @ldisc without any being lost @@ -216,8 +212,7 @@ struct tty_struct { spinlock_t lock; bool stopped; bool tco_stopped; - unsigned long unused[0]; - } __aligned(sizeof(unsigned long)) flow; + } flow; struct { struct pid *pgrp; @@ -225,8 +220,7 @@ struct tty_struct { spinlock_t lock; unsigned char pktstatus; bool packet; - unsigned long unused[0]; - } __aligned(sizeof(unsigned long)) ctrl; + } ctrl; bool hw_stopped; bool closing; diff --git a/include/linux/udp.h b/include/linux/udp.h index e398e1dbd2..3eb3f2b9a2 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -24,7 +24,7 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb) } #define UDP_HTABLE_SIZE_MIN_PERNET 128 -#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) +#define UDP_HTABLE_SIZE_MIN (IS_ENABLED(CONFIG_BASE_SMALL) ? 128 : 256) #define UDP_HTABLE_SIZE_MAX 65536 static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) diff --git a/include/linux/uio.h b/include/linux/uio.h index 00cebe2b70..7020adedfa 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -206,6 +206,16 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) } static __always_inline __must_check +bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i) +{ + size_t copied = copy_to_iter(addr, bytes, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; +} + +static __always_inline __must_check bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { size_t copied = copy_from_iter(addr, bytes, i); diff --git a/include/linux/usb.h b/include/linux/usb.h index 9e52179872..1913a13833 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -440,11 +440,6 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size, /* ----------------------------------------------------------------------- */ -/* USB device number allocation bitmap */ -struct usb_devmap { - unsigned long devicemap[128 / (8*sizeof(unsigned long))]; -}; - /* * Allocated per bus (tree of devices) we have: */ @@ -472,7 +467,7 @@ struct usb_bus { * round-robin allocation */ struct mutex devnum_next_mutex; /* devnum_next mutex */ - struct usb_devmap devmap; /* device address allocation map */ + DECLARE_BITMAP(devmap, 128); /* USB device number allocation bitmap */ struct usb_device *root_hub; /* Root hub */ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */ diff --git a/include/linux/usb/onboard_dev.h b/include/linux/usb/onboard_dev.h new file mode 100644 index 0000000000..b79db6d193 --- /dev/null +++ b/include/linux/usb/onboard_dev.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_USB_ONBOARD_DEV_H +#define __LINUX_USB_ONBOARD_DEV_H + +struct usb_device; +struct list_head; + +#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV) +void onboard_dev_create_pdevs(struct usb_device *parent_dev, struct list_head *pdev_list); +void onboard_dev_destroy_pdevs(struct list_head *pdev_list); +#else +static inline void onboard_dev_create_pdevs(struct usb_device *parent_dev, + struct list_head *pdev_list) {} +static inline void onboard_dev_destroy_pdevs(struct list_head *pdev_list) {} +#endif + +#endif /* __LINUX_USB_ONBOARD_DEV_H */ diff --git a/include/linux/usb/onboard_hub.h b/include/linux/usb/onboard_hub.h deleted file mode 100644 index d937323055..0000000000 --- a/include/linux/usb/onboard_hub.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __LINUX_USB_ONBOARD_HUB_H -#define __LINUX_USB_ONBOARD_HUB_H - -struct usb_device; -struct list_head; - -#if IS_ENABLED(CONFIG_USB_ONBOARD_HUB) -void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list); -void onboard_hub_destroy_pdevs(struct list_head *pdev_list); -#else -static inline void onboard_hub_create_pdevs(struct usb_device *parent_hub, - struct list_head *pdev_list) {} -static inline void onboard_hub_destroy_pdevs(struct list_head *pdev_list) {} -#endif - -#endif /* __LINUX_USB_ONBOARD_HUB_H */ diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 372898d9ee..67bfcda6c7 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -194,9 +194,4 @@ struct renesas_usbhs_platform_info { struct renesas_usbhs_driver_param driver_param; }; -/* - * macro for platform - */ -#define renesas_usbhs_get_info(pdev)\ - ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data) #endif /* RENESAS_USB_H */ diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h index 46e73584b6..e6c14f2b1f 100644 --- a/include/linux/usb/tegra_usb_phy.h +++ b/include/linux/usb/tegra_usb_phy.h @@ -7,11 +7,12 @@ #define __TEGRA_USB_PHY_H #include <linux/clk.h> -#include <linux/gpio.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/usb/otg.h> +struct gpio_desc; + /* * utmi_pll_config_in_car_module: true if the UTMI PLL configuration registers * should be set up by clk-tegra, false if by the PHY code diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 26c4325aa3..96fea92087 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -126,6 +126,8 @@ struct virtio_admin_cmd { * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. + * @debugfs_dir: debugfs directory entry. + * @debugfs_filter_features: features to be filtered set by debugfs. */ struct virtio_device { int index; @@ -141,6 +143,10 @@ struct virtio_device { struct list_head vqs; u64 features; void *priv; +#ifdef CONFIG_VIRTIO_DEBUG + struct dentry *debugfs_dir; + u64 debugfs_filter_features; +#endif }; #define dev_to_virtio(_dev) container_of_const(_dev, struct virtio_device, dev) @@ -237,4 +243,33 @@ void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t a void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir); + +#ifdef CONFIG_VIRTIO_DEBUG +void virtio_debug_device_init(struct virtio_device *dev); +void virtio_debug_device_exit(struct virtio_device *dev); +void virtio_debug_device_filter_features(struct virtio_device *dev); +void virtio_debug_init(void); +void virtio_debug_exit(void); +#else +static inline void virtio_debug_device_init(struct virtio_device *dev) +{ +} + +static inline void virtio_debug_device_exit(struct virtio_device *dev) +{ +} + +static inline void virtio_debug_device_filter_features(struct virtio_device *dev) +{ +} + +static inline void virtio_debug_init(void) +{ +} + +static inline void virtio_debug_exit(void) +{ +} +#endif + #endif /* _LINUX_VIRTIO_H */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 4dfa9b69ca..d1d7825318 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -56,6 +56,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, unsigned int thlen = 0; unsigned int p_off = 0; unsigned int ip_proto; + u64 ret, remainder, gso_size; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -98,6 +99,16 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16)); + if (hdr->gso_size) { + gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + ret = div64_u64_rem(skb->len, gso_size, &remainder); + if (!(ret && (hdr->gso_size > needed) && + ((remainder > needed) || (remainder == 0)))) { + return -EINVAL; + } + skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG; + } + if (!pskb_may_pull(skb, needed)) return -EINVAL; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 98ea90e904..e4a631ec43 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -2,6 +2,8 @@ #ifndef _LINUX_VMALLOC_H #define _LINUX_VMALLOC_H +#include <linux/alloc_tag.h> +#include <linux/sched.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/list.h> @@ -138,26 +140,54 @@ extern unsigned long vmalloc_nr_pages(void); static inline unsigned long vmalloc_nr_pages(void) { return 0; } #endif -extern void *vmalloc(unsigned long size) __alloc_size(1); -extern void *vzalloc(unsigned long size) __alloc_size(1); -extern void *vmalloc_user(unsigned long size) __alloc_size(1); -extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); -extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); -extern void *vmalloc_32(unsigned long size) __alloc_size(1); -extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); -extern void *__vmalloc_node_range(unsigned long size, unsigned long align, +extern void *vmalloc_noprof(unsigned long size) __alloc_size(1); +#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__)) + +extern void *vzalloc_noprof(unsigned long size) __alloc_size(1); +#define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__)) + +extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1); +#define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__)) + +extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1); +#define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__)) + +extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1); +#define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__)) + +extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1); +#define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__)) + +extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1); +#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__)) + +extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); +#define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__)) + +extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) __alloc_size(1); -void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, +#define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__)) + +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) __alloc_size(1); -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1); +#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__)) + +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); +#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__)) + +extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__)) + +extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2); +#define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__)) + +extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +#define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__)) -extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); -extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); -extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); -extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); +extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); +#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 343906a98d..735eae6e27 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -487,14 +487,6 @@ static inline void node_stat_sub_folio(struct folio *folio, mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); } -static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, - int migratetype) -{ - __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); - if (is_migrate_cma(migratetype)) - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); -} - extern const char * const vmstat_text[]; static inline const char *zone_stat_name(enum zone_stat_item item) diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 3684487d01..29dd5b91dd 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -5,10 +5,6 @@ #include <linux/context_tracking_state.h> #include <linux/sched.h> -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include <asm/vtime.h> -#endif - /* * Common vtime APIs */ @@ -18,7 +14,6 @@ extern void vtime_account_idle(struct task_struct *tsk); #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void arch_vtime_task_switch(struct task_struct *tsk); extern void vtime_user_enter(struct task_struct *tsk); extern void vtime_user_exit(struct task_struct *tsk); extern void vtime_guest_enter(struct task_struct *tsk); diff --git a/include/linux/wordpart.h b/include/linux/wordpart.h index f6f8f83b15..4ca1ba66d2 100644 --- a/include/linux/wordpart.h +++ b/include/linux/wordpart.h @@ -39,4 +39,11 @@ */ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) +/* Set bits in the first 'n' bytes when loaded from memory */ +#ifdef __LITTLE_ENDIAN +# define aligned_byte_mask(n) ((1UL << 8*(n))-1) +#else +# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) +#endif + #endif // _LINUX_WORDPART_H diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 72031fa804..d9968bfc8e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -51,20 +51,23 @@ enum work_bits { * data contains off-queue information when !WORK_STRUCT_PWQ. * * MSB - * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ] - * 1 bit 4 or 5 bits + * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] + * 16 bits 1 bit 4 or 5 bits */ WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, - WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT, + WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT, WORK_OFFQ_FLAG_END, WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, + WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_DISABLE_BITS = 16, + /* * When a work item is off queue, the high bits encode off-queue flags * and the last pool it was on. Cap pool ID to 31 bits and use the * highest number to indicate that no pool is associated. */ - WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS, WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, }; @@ -96,7 +99,9 @@ enum wq_misc_consts { }; /* Convenience constants - of type 'unsigned long', not 'enum'! */ -#define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT) +#define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT) +#define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) +#define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) @@ -180,6 +185,9 @@ struct workqueue_attrs { * Below fields aren't properties of a worker_pool. They only modify how * :c:func:`apply_workqueue_attrs` select pools and thus don't * participate in pool hash calculations or equality comparisons. + * + * If @affn_strict is set, @cpumask isn't a property of a worker_pool + * either. */ /** @@ -465,7 +473,7 @@ void workqueue_softirq_dead(unsigned int cpu); * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default - * remaining args: args for @fmt + * @...: args for @fmt * * For a per-cpu workqueue, @max_active limits the number of in-flight work * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be @@ -559,6 +567,14 @@ extern bool flush_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work_sync(struct delayed_work *dwork); +extern bool disable_work(struct work_struct *work); +extern bool disable_work_sync(struct work_struct *work); +extern bool enable_work(struct work_struct *work); + +extern bool disable_delayed_work(struct delayed_work *dwork); +extern bool disable_delayed_work_sync(struct delayed_work *dwork); +extern bool enable_delayed_work(struct delayed_work *dwork); + extern bool flush_rcu_work(struct rcu_work *rwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, @@ -666,6 +682,32 @@ static inline bool schedule_work(struct work_struct *work) return queue_work(system_wq, work); } +/** + * enable_and_queue_work - Enable and queue a work item on a specific workqueue + * @wq: The target workqueue + * @work: The work item to be enabled and queued + * + * This function combines the operations of enable_work() and queue_work(), + * providing a convenient way to enable and queue a work item in a single call. + * It invokes enable_work() on @work and then queues it if the disable depth + * reached 0. Returns %true if the disable depth reached 0 and @work is queued, + * and %false otherwise. + * + * Note that @work is always queued when disable depth reaches zero. If the + * desired behavior is queueing only if certain events took place while @work is + * disabled, the user should implement the necessary state tracking and perform + * explicit conditional queueing after enable_work(). + */ +static inline bool enable_and_queue_work(struct workqueue_struct *wq, + struct work_struct *work) +{ + if (enable_work(work)) { + queue_work(wq, work); + return true; + } + return false; +} + /* * Detect attempt to flush system-wide workqueues at compile time when possible. * Warn attempt to flush system-wide workqueues at runtime. diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 9845cb62e4..112d806ddb 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -355,6 +355,7 @@ int dirtytime_interval_handler(struct ctl_table *table, int write, void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); +unsigned long cgwb_calc_thresh(struct bdi_writeback *wb); void wb_update_bandwidth(struct bdi_writeback *wb); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index cb571dfcf4..0b618ec041 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -12,14 +12,18 @@ #include <linux/bitmap.h> #include <linux/bug.h> #include <linux/compiler.h> +#include <linux/err.h> #include <linux/gfp.h> #include <linux/kconfig.h> -#include <linux/kernel.h> +#include <linux/limits.h> +#include <linux/lockdep.h> #include <linux/rcupdate.h> #include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/types.h> +struct list_lru; + /* * The bottom two bits of the entry determine how the XArray interprets * the contents: @@ -1141,12 +1145,12 @@ static inline void xa_release(struct xarray *xa, unsigned long index) * doubled the number of slots per node, we'd get only 3 nodes per 4kB page. */ #ifndef XA_CHUNK_SHIFT -#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) +#define XA_CHUNK_SHIFT (IS_ENABLED(CONFIG_BASE_SMALL) ? 4 : 6) #endif #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT) #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1) #define XA_MAX_MARKS 3 -#define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG) +#define XA_MARK_LONGS BITS_TO_LONGS(XA_CHUNK_SIZE) /* * @count is the count of every non-NULL element in the ->slots array @@ -1548,6 +1552,7 @@ void xas_create_range(struct xa_state *); #ifdef CONFIG_XARRAY_MULTI int xa_get_order(struct xarray *, unsigned long index); +int xas_get_order(struct xa_state *xas); void xas_split(struct xa_state *, void *entry, unsigned int order); void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); #else @@ -1556,6 +1561,11 @@ static inline int xa_get_order(struct xarray *xa, unsigned long index) return 0; } +static inline int xas_get_order(struct xa_state *xas) +{ + return 0; +} + static inline void xas_split(struct xa_state *xas, void *entry, unsigned int order) { diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 3296438eec..a67d62b796 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -53,7 +53,7 @@ void *zpool_map_handle(struct zpool *pool, unsigned long handle, void zpool_unmap_handle(struct zpool *pool, unsigned long handle); -u64 zpool_get_total_size(struct zpool *pool); +u64 zpool_get_total_pages(struct zpool *pool); /** @@ -91,7 +91,7 @@ struct zpool_driver { enum zpool_mapmode mm); void (*unmap)(void *pool, unsigned long handle); - u64 (*total_size)(void *pool); + u64 (*total_pages)(void *pool); }; void zpool_register_driver(struct zpool_driver *driver); diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 341aea4900..2a85b941db 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -7,7 +7,6 @@ struct lruvec; -extern u64 zswap_pool_total_size; extern atomic_t zswap_stored_pages; #ifdef CONFIG_ZSWAP @@ -27,6 +26,7 @@ struct zswap_lruvec_state { atomic_long_t nr_zswap_protected; }; +unsigned long zswap_total_pages(void); bool zswap_store(struct folio *folio); bool zswap_load(struct folio *folio); void zswap_invalidate(swp_entry_t swp); diff --git a/include/media/cec.h b/include/media/cec.h index cc3fcd0496..d131514032 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -173,7 +173,7 @@ struct cec_adap_ops { * case the transmit will finish, but will not retransmit * and be marked as ABORTED. * @xfer_timeout_ms: the transfer timeout in ms. - * If 0, then timeout after 2.1 ms. + * If 0, then timeout after 2100 ms. * @kthread_config: kthread used to configure a CEC adapter * @config_completion: used to signal completion of the config kthread * @kthread: main CEC processing thread @@ -187,6 +187,7 @@ struct cec_adap_ops { * in order to transmit or receive CEC messages. This is usually a HW * limitation. * @is_enabled: the CEC adapter is enabled + * @is_claiming_log_addrs: true if cec_claim_log_addrs() is running * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs) * @must_reconfigure: while configuring, the PA changed, so reclaim LAs * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) diff --git a/include/media/ipu6-pci-table.h b/include/media/ipu6-pci-table.h new file mode 100644 index 0000000000..0899d9d2f9 --- /dev/null +++ b/include/media/ipu6-pci-table.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024 Intel Corporation + */ + +#ifndef __IPU6_PCI_TBL_H__ +#define __IPU6_PCI_TBL_H__ + +#include <linux/pci.h> + +#define PCI_DEVICE_ID_INTEL_IPU6 0x9a19 +#define PCI_DEVICE_ID_INTEL_IPU6SE 0x4e19 +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLP 0x465d +#define PCI_DEVICE_ID_INTEL_IPU6EP_ADLN 0x462e +#define PCI_DEVICE_ID_INTEL_IPU6EP_RPLP 0xa75d +#define PCI_DEVICE_ID_INTEL_IPU6EP_MTL 0x7d19 + +static const struct pci_device_id ipu6_pci_tbl[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6SE) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_ADLN) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_RPLP) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IPU6EP_MTL) }, + { } +}; + +#endif /* __IPU6_PCI_TBL_H__ */ diff --git a/include/media/media-device.h b/include/media/media-device.h index 2c146d0b2b..53d2a16a70 100644 --- a/include/media/media-device.h +++ b/include/media/media-device.h @@ -429,6 +429,9 @@ void __media_device_usb_init(struct media_device *mdev, const char *driver_name); #else +static inline void media_device_init(struct media_device *mdev) +{ +} static inline int media_device_register(struct media_device *mdev) { return 0; @@ -436,6 +439,9 @@ static inline int media_device_register(struct media_device *mdev) static inline void media_device_unregister(struct media_device *mdev) { } +static inline void media_device_cleanup(struct media_device *mdev) +{ +} static inline int media_device_register_entity(struct media_device *mdev, struct media_entity *entity) { diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h index 9bd326d311..f26c323e9c 100644 --- a/include/media/v4l2-async.h +++ b/include/media/v4l2-async.h @@ -310,7 +310,9 @@ void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier); * * @sd: pointer to &struct v4l2_subdev */ -int v4l2_async_register_subdev(struct v4l2_subdev *sd); +#define v4l2_async_register_subdev(sd) \ + __v4l2_async_register_subdev(sd, THIS_MODULE) +int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module); /** * v4l2_async_register_subdev_sensor - registers a sensor sub-device to the diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h index f6f111fae3..dd897a362f 100644 --- a/include/media/v4l2-device.h +++ b/include/media/v4l2-device.h @@ -156,8 +156,11 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev); * An error is returned if the module is no longer loaded on any attempts * to register it. */ -int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, - struct v4l2_subdev *sd); +#define v4l2_device_register_subdev(v4l2_dev, sd) \ + __v4l2_device_register_subdev(v4l2_dev, sd, THIS_MODULE) +int __must_check __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, + struct v4l2_subdev *sd, + struct module *module); /** * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device. diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index edb733f216..bdbb7e5423 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -163,6 +163,8 @@ struct v4l2_fh; * :ref:`VIDIOC_CREATE_BUFS <vidioc_create_bufs>` ioctl * @vidioc_prepare_buf: pointer to the function that implements * :ref:`VIDIOC_PREPARE_BUF <vidioc_prepare_buf>` ioctl + * @vidioc_remove_bufs: pointer to the function that implements + * :ref:`VIDIOC_REMOVE_BUFS <vidioc_remove_bufs>` ioctl * @vidioc_overlay: pointer to the function that implements * :ref:`VIDIOC_OVERLAY <vidioc_overlay>` ioctl * @vidioc_g_fbuf: pointer to the function that implements @@ -422,6 +424,8 @@ struct v4l2_ioctl_ops { struct v4l2_create_buffers *b); int (*vidioc_prepare_buf)(struct file *file, void *fh, struct v4l2_buffer *b); + int (*vidioc_remove_bufs)(struct file *file, void *fh, + struct v4l2_remove_buffers *d); int (*vidioc_overlay)(struct file *file, void *fh, unsigned int i); int (*vidioc_g_fbuf)(struct file *file, void *fh, diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 7f1af1f7f9..0af330cf91 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -867,6 +867,8 @@ int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb); int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh, struct v4l2_create_buffers *create); +int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *d); int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf); int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh, diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index a9e6b81462..e30c463d90 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -452,14 +452,6 @@ enum v4l2_subdev_pre_streamon_flags { * * @g_pixelaspect: callback to return the pixelaspect ratio. * - * @s_dv_timings: Set custom dv timings in the sub device. This is used - * when sub device is capable of setting detailed timing information - * in the hardware to generate/detect the video signal. - * - * @g_dv_timings: Get custom dv timings in the sub device. - * - * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code. - * * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev * can adjust @size to a lower value and must not write more data to the * buffer starting at @data than the original value of @size. @@ -490,12 +482,6 @@ struct v4l2_subdev_video_ops { int (*g_input_status)(struct v4l2_subdev *sd, u32 *status); int (*s_stream)(struct v4l2_subdev *sd, int enable); int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect); - int (*s_dv_timings)(struct v4l2_subdev *sd, - struct v4l2_dv_timings *timings); - int (*g_dv_timings)(struct v4l2_subdev *sd, - struct v4l2_dv_timings *timings); - int (*query_dv_timings)(struct v4l2_subdev *sd, - struct v4l2_dv_timings *timings); int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf, unsigned int *size); int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags); @@ -728,12 +714,14 @@ struct v4l2_subdev_stream_configs { /** * struct v4l2_subdev_krouting - subdev routing table * + * @len_routes: length of routes array, in routes * @num_routes: number of routes * @routes: &struct v4l2_subdev_route * * This structure contains the routing table for a subdev. */ struct v4l2_subdev_krouting { + unsigned int len_routes; unsigned int num_routes; struct v4l2_subdev_route *routes; }; @@ -791,6 +779,14 @@ struct v4l2_subdev_state { * * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code. * + * @s_dv_timings: Set custom dv timings in the sub device. This is used + * when sub device is capable of setting detailed timing information + * in the hardware to generate/detect the video signal. + * + * @g_dv_timings: Get custom dv timings in the sub device. + * + * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code. + * * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP() ioctl handler * code. * @@ -864,6 +860,12 @@ struct v4l2_subdev_pad_ops { struct v4l2_subdev_frame_interval *interval); int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid); + int (*s_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*g_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); + int (*query_dv_timings)(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_dv_timings *timings); int (*dv_timings_cap)(struct v4l2_subdev *sd, struct v4l2_dv_timings_cap *cap); int (*enum_dv_timings)(struct v4l2_subdev *sd, @@ -1725,6 +1727,46 @@ static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state) } /** + * v4l2_subdev_lock_states - Lock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Locks the state of two sub-devices. + * + * The states must be unlocked with v4l2_subdev_unlock_states() after use. + * + * This differs from calling v4l2_subdev_lock_state() on both states so that if + * the states share the same lock, the lock is acquired only once (so no + * deadlock occurs). The caller is responsible for ensuring the locks will + * always be acquired in the same order. + */ +static inline void v4l2_subdev_lock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_lock(state1->lock); + if (state1->lock != state2->lock) + mutex_lock(state2->lock); +} + +/** + * v4l2_subdev_unlock_states() - Unlock two sub-device states + * @state1: One subdevice state + * @state2: The other subdevice state + * + * Unlocks the state of two sub-devices. + * + * This differs from calling v4l2_subdev_unlock_state() on both states so that + * if the states share the same lock, the lock is released only once. + */ +static inline void v4l2_subdev_unlock_states(struct v4l2_subdev_state *state1, + struct v4l2_subdev_state *state2) +{ + mutex_unlock(state1->lock); + if (state1->lock != state2->lock) + mutex_unlock(state2->lock); +} + +/** * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state * is unlocked and returns it * @sd: The subdevice diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 8b86996b27..955237ac50 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -346,8 +346,8 @@ struct vb2_buffer { * describes the requested number of planes and sizes\[\] * contains the requested plane sizes. In this case * \*num_buffers are being allocated additionally to - * q->num_buffers. If either \*num_planes or the requested - * sizes are invalid callback must return %-EINVAL. + * the buffers already allocated. If either \*num_planes + * or the requested sizes are invalid callback must return %-EINVAL. * @wait_prepare: release any locks taken while calling vb2 functions; * it is called before an ioctl needs to wait for a new * buffer to arrive; required to avoid a deadlock in @@ -549,9 +549,21 @@ struct vb2_buf_ops { * @start_streaming can be called. Used when a DMA engine * cannot be started unless at least this number of buffers * have been queued into the driver. - * VIDIOC_REQBUFS will ensure at least @min_queued_buffers + * VIDIOC_REQBUFS will ensure at least @min_queued_buffers + 1 * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not * modify the requested buffer count. + * @min_reqbufs_allocation: the minimum number of buffers to be allocated when + * calling VIDIOC_REQBUFS. Note that VIDIOC_CREATE_BUFS will *not* + * modify the requested buffer count and does not use this field. + * Drivers can set this if there has to be a certain number of + * buffers available for the hardware to work effectively. + * This allows calling VIDIOC_REQBUFS with a buffer count of 1 and + * it will be automatically adjusted to a workable buffer count. + * If set, then @min_reqbufs_allocation must be larger than + * @min_queued_buffers + 1. + * If this field is > 3, then it is highly recommended that the + * driver implements the V4L2_CID_MIN_BUFFERS_FOR_CAPTURE/OUTPUT + * control. * @alloc_devs: &struct device memory type/allocator-specific per-plane device */ /* @@ -559,8 +571,9 @@ struct vb2_buf_ops { * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped * @memory: current memory type used * @dma_dir: DMA mapping direction. - * @bufs: videobuf2 buffer structures - * @num_buffers: number of allocated/used buffers + * @bufs: videobuf2 buffer structures. If it is non-NULL then + * bufs_bitmap is also non-NULL. + * @bufs_bitmap: bitmap tracking whether each bufs[] entry is used * @max_num_buffers: upper limit of number of allocated/used buffers. * If set to 0 v4l2 core will change it VB2_MAX_FRAME * for backward compatibility. @@ -582,6 +595,7 @@ struct vb2_buf_ops { * released. Used to prevent destroying the queue by other threads. * @is_multiplanar: set if buffer type is multiplanar * @is_output: set if buffer type is output + * @is_busy: set if at least one buffer has been allocated at some time. * @copy_timestamp: set if vb2-core should set timestamps * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the * last decoded buffer was already dequeued. Set for capture queues @@ -621,6 +635,7 @@ struct vb2_queue { u32 timestamp_flags; gfp_t gfp_flags; u32 min_queued_buffers; + u32 min_reqbufs_allocation; struct device *alloc_devs[VB2_MAX_PLANES]; @@ -629,7 +644,7 @@ struct vb2_queue { unsigned int memory; enum dma_data_direction dma_dir; struct vb2_buffer **bufs; - unsigned int num_buffers; + unsigned long *bufs_bitmap; unsigned int max_num_buffers; struct list_head queued_list; @@ -647,6 +662,7 @@ struct vb2_queue { unsigned int waiting_in_dqbuf:1; unsigned int is_multiplanar:1; unsigned int is_output:1; + unsigned int is_busy:1; unsigned int copy_timestamp:1; unsigned int last_buffer_dequeued:1; @@ -811,6 +827,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, * @count: requested buffer count. * @requested_planes: number of planes requested. * @requested_sizes: array with the size of the planes. + * @first_index: index of the first created buffer, all allocated buffers have + * indices in the range [first_index..first_index+count-1] * * Videobuf2 core helper to implement VIDIOC_CREATE_BUFS() operation. It is * called internally by VB2 by an API-specific handler, like @@ -827,7 +845,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int flags, unsigned int *count, unsigned int requested_planes, - const unsigned int requested_sizes[]); + const unsigned int requested_sizes[], + unsigned int *first_index); /** * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace @@ -852,6 +871,16 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb); /** + * vb2_core_remove_bufs() - + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @start: first index of the range of buffers to remove. + * @count: number of buffers to remove. + * + * Return: returns zero on success; an error code otherwise. + */ +int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count); + +/** * vb2_core_qbuf() - Queue a buffer from userspace * * @q: pointer to &struct vb2_queue with videobuf2 queue. @@ -1155,7 +1184,10 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q) */ static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q) { - return q->num_buffers; + if (q->bufs_bitmap) + return bitmap_weight(q->bufs_bitmap, q->max_num_buffers); + + return 0; } /** @@ -1166,7 +1198,7 @@ static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q) */ static inline bool vb2_is_busy(struct vb2_queue *q) { - return vb2_get_num_buffers(q) > 0; + return !!q->is_busy; } /** @@ -1264,7 +1296,7 @@ static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q, if (index >= q->max_num_buffers) return NULL; - if (index < q->num_buffers) + if (test_bit(index, q->bufs_bitmap)) return q->bufs[index]; return NULL; } diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h index 5a84588785..77ce8238ab 100644 --- a/include/media/videobuf2-v4l2.h +++ b/include/media/videobuf2-v4l2.h @@ -334,6 +334,8 @@ int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p); +int vb2_ioctl_remove_bufs(struct file *file, void *priv, + struct v4l2_remove_buffers *p); /* struct v4l2_file_operations helpers */ diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 78ebcf782c..4f785098c6 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -207,6 +207,8 @@ int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err); int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err); +struct netfs_io_subrequest; +void p9_client_write_subreq(struct netfs_io_subrequest *subreq); int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset); int p9dirent_read(struct p9_client *clnt, char *buf, int len, struct p9_dirent *dirent); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 3dee0b2721..b6eedf7650 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -17,14 +17,31 @@ static inline struct unix_sock *unix_get_socket(struct file *filp) } #endif -extern spinlock_t unix_gc_lock; extern unsigned int unix_tot_inflight; - -void unix_inflight(struct user_struct *user, struct file *fp); -void unix_notinflight(struct user_struct *user, struct file *fp); +void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver); +void unix_del_edges(struct scm_fp_list *fpl); +void unix_update_edges(struct unix_sock *receiver); +int unix_prepare_fpl(struct scm_fp_list *fpl); +void unix_destroy_fpl(struct scm_fp_list *fpl); void unix_gc(void); void wait_for_unix_gc(struct scm_fp_list *fpl); +struct unix_vertex { + struct list_head edges; + struct list_head entry; + struct list_head scc_entry; + unsigned long out_degree; + unsigned long index; + unsigned long scc_index; +}; + +struct unix_edge { + struct unix_sock *predecessor; + struct unix_sock *successor; + struct list_head vertex_entry; + struct list_head stack_entry; +}; + struct sock *unix_peer_get(struct sock *sk); #define UNIX_HASH_MOD (256 - 1) @@ -50,6 +67,7 @@ struct unix_skb_parms { struct scm_stat { atomic_t nr_fds; + unsigned long nr_unix_fds; }; #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) @@ -62,12 +80,9 @@ struct unix_sock { struct path path; struct mutex iolock, bindlock; struct sock *peer; - struct list_head link; - unsigned long inflight; + struct sock *listener; + struct unix_vertex *vertex; spinlock_t lock; - unsigned long gc_flags; -#define UNIX_GC_CANDIDATE 0 -#define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; wait_queue_entry_t peer_wake; struct scm_stat scm_stat; diff --git a/include/net/ax25.h b/include/net/ax25.h index c2a85fd3f5..cb622d84cd 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -139,7 +139,9 @@ enum { AX25_VALUES_N2, /* Default N2 value */ AX25_VALUES_PACLEN, /* AX.25 MTU */ AX25_VALUES_PROTOCOL, /* Std AX.25, DAMA Slave, DAMA Master */ +#ifdef CONFIG_AX25_DAMA_SLAVE AX25_VALUES_DS_TIMEOUT, /* DAMA Slave timeout */ +#endif AX25_MAX_VALUES /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */ }; diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index d46320f7fd..e372a88e8c 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -444,7 +444,6 @@ enum { #define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */ #define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */ -#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */ /* HCI data types */ #define HCI_COMMAND_PKT 0x01 @@ -2050,7 +2049,7 @@ struct hci_cp_le_set_cig_params { __le16 c_latency; __le16 p_latency; __u8 num_cis; - struct hci_cis_params cis[]; + struct hci_cis_params cis[] __counted_by(num_cis); } __packed; struct hci_rp_le_set_cig_params { @@ -2122,7 +2121,7 @@ struct hci_cp_le_big_create_sync { __u8 mse; __le16 timeout; __u8 num_bis; - __u8 bis[]; + __u8 bis[] __counted_by(num_bis); } __packed; #define HCI_OP_LE_BIG_TERM_SYNC 0x206c diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 17b2a7baa4..b15f51ae3b 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -91,8 +91,6 @@ struct discovery_state { s8 rssi; u16 uuid_count; u8 (*uuids)[16]; - unsigned long scan_start; - unsigned long scan_duration; unsigned long name_resolve_timeout; }; @@ -890,8 +888,6 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev) hdev->discovery.uuid_count = 0; kfree(hdev->discovery.uuids); hdev->discovery.uuids = NULL; - hdev->discovery.scan_start = 0; - hdev->discovery.scan_duration = 0; } bool hci_discovery_active(struct hci_dev *hdev); @@ -2220,8 +2216,22 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); /* These LE scan and inquiry parameters were chosen according to LE General * Discovery Procedure specification. */ -#define DISCOV_LE_SCAN_WIN 0x12 -#define DISCOV_LE_SCAN_INT 0x12 +#define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */ +#define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */ +#define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */ +#define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */ +#define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */ +#define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */ +#define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */ +#define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */ +#define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */ +#define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */ +#define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */ +#define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */ +#define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */ +#define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */ +#define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */ +#define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */ #define DISCOV_LE_TIMEOUT 10240 /* msec */ #define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 3434cfc26b..5cfdc81349 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -463,18 +463,24 @@ struct l2cap_le_credits { #define L2CAP_ECRED_MAX_CID 5 struct l2cap_ecred_conn_req { - __le16 psm; - __le16 mtu; - __le16 mps; - __le16 credits; + /* New members must be added within the struct_group() macro below. */ + __struct_group(l2cap_ecred_conn_req_hdr, hdr, __packed, + __le16 psm; + __le16 mtu; + __le16 mps; + __le16 credits; + ); __le16 scid[]; } __packed; struct l2cap_ecred_conn_rsp { - __le16 mtu; - __le16 mps; - __le16 credits; - __le16 result; + /* New members must be added within the struct_group() macro below. */ + struct_group_tagged(l2cap_ecred_conn_rsp_hdr, hdr, + __le16 mtu; + __le16 mps; + __le16 credits; + __le16 result; + ); __le16 dcid[]; }; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1e09329acc..cbf1664dc5 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1149,6 +1149,8 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) * @band_mask: which bands to check on * @prohibited_flags: which channels to not consider usable, * %IEEE80211_CHAN_DISABLED is always taken into account + * + * Return: %true if usable channels found, %false otherwise */ bool cfg80211_any_usable_channels(struct wiphy *wiphy, unsigned long band_mask, @@ -1575,6 +1577,8 @@ struct cfg80211_csa_settings { * @beacon_next: beacon data to be used after the color change * @count: number of beacons until the color change * @color: the color used after the change + * @link_id: defines the link on which color change is expected during MLO. + * 0 in case of non-MLO. */ struct cfg80211_color_change_settings { struct cfg80211_beacon_data beacon_color_change; @@ -1583,6 +1587,7 @@ struct cfg80211_color_change_settings { struct cfg80211_beacon_data beacon_next; u8 count; u8 color; + u8 link_id; }; /** @@ -1833,9 +1838,11 @@ enum cfg80211_station_type { * * Utility function for the @change_station driver method. Call this function * with the appropriate station type looking up the station (and checking that - * it exists). It will verify whether the station change is acceptable, and if - * not will return an error code. Note that it may modify the parameters for - * backward compatibility reasons, so don't use them before calling this. + * it exists). It will verify whether the station change is acceptable. + * + * Return: 0 if the change is acceptable, otherwise an error code. Note that + * it may modify the parameters for backward compatibility reasons, so don't + * use them before calling this. */ int cfg80211_check_station_change(struct wiphy *wiphy, struct station_parameters *params, @@ -2229,7 +2236,7 @@ struct cfg80211_sar_capa { * @mac_addr: the mac address of the station of interest * @sinfo: pointer to the structure to fill with the information * - * Returns 0 on success and sinfo is filled with the available information + * Return: 0 on success and sinfo is filled with the available information * otherwise returns a negative error code and the content of sinfo has to be * considered undefined. */ @@ -5334,6 +5341,8 @@ struct wiphy_iftype_ext_capab { * cfg80211_get_iftype_ext_capa - lookup interface type extended capability * @wiphy: the wiphy to look up from * @type: the interface type to look up + * + * Return: The extended capability for the given interface @type, may be %NULL */ const struct wiphy_iftype_ext_capab * cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type); @@ -5904,6 +5913,10 @@ int wiphy_register(struct wiphy *wiphy); /** * get_wiphy_regdom - get custom regdomain for the given wiphy * @wiphy: the wiphy to get the regdomain from + * + * Context: Requires any of RTNL, wiphy mutex or RCU protection. + * + * Return: pointer to the regulatory domain associated with the wiphy */ const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy); @@ -6421,6 +6434,8 @@ ieee80211_get_channel(struct wiphy *wiphy, int freq) * * The Preferred Scanning Channels (PSC) are defined in * Draft IEEE P802.11ax/D5.0, 26.17.2.3.3 + * + * Return: %true if channel is a PSC, %false otherwise */ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan) { @@ -6450,8 +6465,8 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, * ieee80211_mandatory_rates - get mandatory rates for a given band * @sband: the band to look for rates in * - * This function returns a bitmap of the mandatory rates for the given - * band, bits are set according to the rate position in the bitrates array. + * Return: a bitmap of the mandatory rates for the given band, bits + * are set according to the rate position in the bitrates array. */ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband); @@ -6665,6 +6680,8 @@ bool ieee80211_get_8023_tunnel_proto(const void *hdr, __be16 *proto); * header to the ethernet header (if present). * * @skb: The 802.3 frame with embedded mesh header + * + * Return: 0 on success. Non-zero on error. */ int ieee80211_strip_8023_mesh_hdr(struct sk_buff *skb); @@ -7043,6 +7060,8 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, * * You can use this to map the regulatory request initiator enum to a * proper string representation. + * + * Return: pointer to string representation of the initiator */ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); @@ -7051,6 +7070,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); * @wiphy: wiphy for which pre-CAC capability is checked. * * Pre-CAC is allowed only in some regdomains (notable ETSI). + * + * Return: %true if allowed, %false otherwise */ bool regulatory_pre_cac_allowed(struct wiphy *wiphy); @@ -7185,6 +7206,8 @@ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid, * cfg80211_is_element_inherited - returns if element ID should be inherited * @element: element to check * @non_inherit_element: non inheritance element + * + * Return: %true if should be inherited, %false otherwise */ bool cfg80211_is_element_inherited(const struct element *element, const struct element *non_inherit_element); @@ -7197,6 +7220,8 @@ bool cfg80211_is_element_inherited(const struct element *element, * @sub_elem: current MBSSID subelement (profile) * @merged_ie: location of the merged profile * @max_copy_len: max merged profile length + * + * Return: the number of bytes merged */ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen, const struct element *mbssid_elem, @@ -7224,7 +7249,7 @@ enum cfg80211_bss_frame_type { * @ielen: length of IEs * @band: enum nl80211_band of the channel * - * Returns the channel number, or -1 if none could be determined. + * Return: the channel number, or -1 if none could be determined. */ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, enum nl80211_band band); @@ -7302,6 +7327,8 @@ cfg80211_inform_bss(struct wiphy *wiphy, * @bss_type: type of BSS, see &enum ieee80211_bss_type * @privacy: privacy filter, see &enum ieee80211_privacy * @use_for: indicates which use is intended + * + * Return: Reference-counted BSS on success. %NULL on error. */ struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, @@ -7322,6 +7349,8 @@ struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy, * @privacy: privacy filter, see &enum ieee80211_privacy * * This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL. + * + * Return: Reference-counted BSS on success. %NULL on error. */ static inline struct cfg80211_bss * cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, @@ -7706,8 +7735,9 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb); * cfg80211_vendor_cmd_get_sender - get the current sender netlink ID * @wiphy: the wiphy * - * Return the current netlink port ID in a vendor command handler. - * Valid to call only there. + * Return: the current netlink port ID in a vendor command handler. + * + * Context: May only be called from a vendor command handler */ unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy); @@ -8260,6 +8290,8 @@ void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie, * * @sinfo: the station information * @gfp: allocation flags + * + * Return: 0 on success. Non-zero on error. */ int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp); @@ -8777,13 +8809,13 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, * also checks if IR-relaxation conditions apply, to allow beaconing under * more permissive conditions. * - * Requires the wiphy mutex to be held. + * Context: Requires the wiphy mutex to be held. */ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype); -/* +/** * cfg80211_ch_switch_notify - update wdev channel and notify userspace * @dev: the device which switched channels * @chandef: the new channel definition @@ -8796,7 +8828,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, unsigned int link_id); -/* +/** * cfg80211_ch_switch_started_notify - notify channel switch start * @dev: the device on which the channel switch started * @chandef: the future channel definition @@ -8819,7 +8851,7 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev, * @operating_class: the operating class to convert * @band: band pointer to fill * - * Returns %true if the conversion was successful, %false otherwise. + * Return: %true if the conversion was successful, %false otherwise. */ bool ieee80211_operating_class_to_band(u8 operating_class, enum nl80211_band *band); @@ -8831,7 +8863,7 @@ bool ieee80211_operating_class_to_band(u8 operating_class, * @chan: the ieee80211_channel to convert * @chandef: a pointer to the resulting chandef * - * Returns %true if the conversion was successful, %false otherwise. + * Return: %true if the conversion was successful, %false otherwise. */ bool ieee80211_operating_class_to_chandef(u8 operating_class, struct ieee80211_channel *chan, @@ -8843,7 +8875,7 @@ bool ieee80211_operating_class_to_chandef(u8 operating_class, * @chandef: the chandef to convert * @op_class: a pointer to the resulting operating class * - * Returns %true if the conversion was successful, %false otherwise. + * Return: %true if the conversion was successful, %false otherwise. */ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class); @@ -8853,7 +8885,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, * * @chandef: the chandef to convert * - * Returns the center frequency of chandef (1st segment) in KHz. + * Return: the center frequency of chandef (1st segment) in KHz. */ static inline u32 ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef) @@ -8861,7 +8893,7 @@ ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef) return MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset; } -/* +/** * cfg80211_tdls_oper_request - request userspace to perform TDLS operation * @dev: the device on which the operation is requested * @peer: the MAC address of the peer device @@ -8880,11 +8912,11 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp); -/* +/** * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units) * @rate: given rate_info to calculate bitrate from * - * return 0 if MCS index >= 32 + * Return: calculated bitrate */ u32 cfg80211_calculate_bitrate(struct rate_info *rate); @@ -8898,7 +8930,7 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate); * when the driver wishes to unregister the wdev, e.g. when the hardware device * is unbound from the driver. * - * Requires the RTNL and wiphy mutex to be held. + * Context: Requires the RTNL and wiphy mutex to be held. */ void cfg80211_unregister_wdev(struct wireless_dev *wdev); @@ -8911,7 +8943,9 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev); * held. Otherwise, both register_netdevice() and register_netdev() are usable * instead as well. * - * Requires the RTNL and wiphy mutex to be held. + * Context: Requires the RTNL and wiphy mutex to be held. + * + * Return: 0 on success. Non-zero on error. */ int cfg80211_register_netdevice(struct net_device *dev); @@ -8924,7 +8958,7 @@ int cfg80211_register_netdevice(struct net_device *dev); * is held. Otherwise, both unregister_netdevice() and unregister_netdev() are * usable instead as well. * - * Requires the RTNL and wiphy mutex to be held. + * Context: Requires the RTNL and wiphy mutex to be held. */ static inline void cfg80211_unregister_netdevice(struct net_device *dev) { @@ -9000,9 +9034,9 @@ int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, * correctly, if not the result of using this function will not * be ordered correctly either, i.e. it does no reordering. * - * The function returns the offset where the next part of the - * buffer starts, which may be @ielen if the entire (remainder) - * of the buffer should be used. + * Return: The offset where the next part of the buffer starts, which + * may be @ielen if the entire (remainder) of the buffer should be + * used. */ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, @@ -9030,9 +9064,9 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen, * correctly, if not the result of using this function will not * be ordered correctly either, i.e. it does no reordering. * - * The function returns the offset where the next part of the - * buffer starts, which may be @ielen if the entire (remainder) - * of the buffer should be used. + * Return: The offset where the next part of the buffer starts, which + * may be @ielen if the entire (remainder) of the buffer should be + * used. */ static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, size_t offset) @@ -9096,6 +9130,8 @@ unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy); * This function can be called by the driver to check whether a * combination of interfaces and their types are allowed according to * the interface combinations. + * + * Return: 0 if combinations are allowed. Non-zero on error. */ int cfg80211_check_combinations(struct wiphy *wiphy, struct iface_combination_params *params); @@ -9111,6 +9147,8 @@ int cfg80211_check_combinations(struct wiphy *wiphy, * This function can be called by the driver to check what possible * combinations it fits in at a given moment, e.g. for channel switching * purposes. + * + * Return: 0 on success. Non-zero on error. */ int cfg80211_iter_combinations(struct wiphy *wiphy, struct iface_combination_params *params, @@ -9118,7 +9156,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, void *data), void *data); -/* +/** * cfg80211_stop_iface - trigger interface disconnection * * @wiphy: the wiphy @@ -9173,6 +9211,8 @@ static inline void wiphy_ext_feature_set(struct wiphy *wiphy, * * The extended features are flagged in multiple bytes (see * &struct wiphy.@ext_features) + * + * Return: %true if extended feature flag is set, %false otherwise */ static inline bool wiphy_ext_feature_isset(struct wiphy *wiphy, @@ -9294,6 +9334,8 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev, * Check whether the interface is allowed to operate; additionally, this API * can be used to check iftype against the software interfaces when * check_swif is '1'. + * + * Return: %true if allowed, %false otherwise */ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, bool is_4addr, u8 check_swif); @@ -9386,60 +9428,78 @@ void cfg80211_bss_flush(struct wiphy *wiphy); * @cmd: the actual event we want to notify * @count: the number of TBTTs until the color change happens * @color_bitmap: representations of the colors that the local BSS is aware of + * @link_id: valid link_id in case of MLO or 0 for non-MLO. + * + * Return: 0 on success. Non-zero on error. */ int cfg80211_bss_color_notify(struct net_device *dev, enum nl80211_commands cmd, u8 count, - u64 color_bitmap); + u64 color_bitmap, u8 link_id); /** * cfg80211_obss_color_collision_notify - notify about bss color collision * @dev: network device * @color_bitmap: representations of the colors that the local BSS is aware of + * @link_id: valid link_id in case of MLO or 0 for non-MLO. + * + * Return: 0 on success. Non-zero on error. */ static inline int cfg80211_obss_color_collision_notify(struct net_device *dev, - u64 color_bitmap) + u64 color_bitmap, + u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_OBSS_COLOR_COLLISION, - 0, color_bitmap); + 0, color_bitmap, link_id); } /** * cfg80211_color_change_started_notify - notify color change start * @dev: the device on which the color is switched * @count: the number of TBTTs until the color change happens + * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Inform the userspace about the color change that has started. + * + * Return: 0 on success. Non-zero on error. */ static inline int cfg80211_color_change_started_notify(struct net_device *dev, - u8 count) + u8 count, u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_STARTED, - count, 0); + count, 0, link_id); } /** * cfg80211_color_change_aborted_notify - notify color change abort * @dev: the device on which the color is switched + * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Inform the userspace about the color change that has aborted. + * + * Return: 0 on success. Non-zero on error. */ -static inline int cfg80211_color_change_aborted_notify(struct net_device *dev) +static inline int cfg80211_color_change_aborted_notify(struct net_device *dev, + u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_ABORTED, - 0, 0); + 0, 0, link_id); } /** * cfg80211_color_change_notify - notify color change completion * @dev: the device on which the color was switched + * @link_id: valid link_id in case of MLO or 0 for non-MLO. * * Inform the userspace about the color change that has completed. + * + * Return: 0 on success. Non-zero on error. */ -static inline int cfg80211_color_change_notify(struct net_device *dev) +static inline int cfg80211_color_change_notify(struct net_device *dev, + u8 link_id) { return cfg80211_bss_color_notify(dev, NL80211_CMD_COLOR_CHANGE_COMPLETED, - 0, 0); + 0, 0, link_id); } /** @@ -9477,6 +9537,8 @@ void cfg80211_schedule_channels_check(struct wireless_dev *wdev); * @ppos: read position * @handler: the read handler to call (under wiphy lock) * @data: additional data to pass to the read handler + * + * Return: the number of characters read, or a negative errno */ ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, @@ -9499,6 +9561,8 @@ ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, * @count: read count * @handler: the write handler to call (under wiphy lock) * @data: additional data to pass to the write handler + * + * Return: the number of characters written, or a negative errno */ ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 53dd7d988a..c9111bb2f5 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -183,7 +183,8 @@ int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr); int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, - const struct netlbl_lsm_secattr *secattr); + const struct netlbl_lsm_secattr *secattr, + bool sk_locked); void cipso_v4_sock_delattr(struct sock *sk); int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr); int cipso_v4_req_setattr(struct request_sock *req, @@ -214,7 +215,8 @@ static inline int cipso_v4_getattr(const unsigned char *cipso, static inline int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, - const struct netlbl_lsm_secattr *secattr) + const struct netlbl_lsm_secattr *secattr, + bool sk_locked) { return -ENOSYS; } diff --git a/include/net/devlink.h b/include/net/devlink.h index 9ac394bdfb..35eb0f8843 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -483,7 +483,8 @@ struct devlink_param { int (*get)(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx); int (*set)(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx); + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack); int (*validate)(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack); @@ -599,12 +600,14 @@ enum devlink_param_generic_id { .validate = _validate, \ } -/* Part number, identifier of board design */ +/* Identifier of board design */ #define DEVLINK_INFO_VERSION_GENERIC_BOARD_ID "board.id" /* Revision of board design */ #define DEVLINK_INFO_VERSION_GENERIC_BOARD_REV "board.rev" /* Maker of the board */ #define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture" +/* Part number of the board and its components */ +#define DEVLINK_INFO_VERSION_GENERIC_BOARD_PART_NUMBER "board.part_number" /* Part number, identifier of asic design */ #define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id" @@ -1602,6 +1605,14 @@ void devlink_free(struct devlink *devlink); * capability. Should be used by device drivers to * enable/disable ipsec_packet capability of a * function managed by the devlink port. + * @port_fn_max_io_eqs_get: Callback used to get port function's maximum number + * of event queues. Should be used by device drivers to + * report the maximum event queues of a function + * managed by the devlink port. + * @port_fn_max_io_eqs_set: Callback used to set port function's maximum number + * of event queues. Should be used by device drivers to + * configure maximum number of event queues + * of a function managed by the devlink port. * * Note: Driver should return -EOPNOTSUPP if it doesn't support * port function (@port_fn_*) handling for a particular port. @@ -1651,6 +1662,12 @@ struct devlink_port_ops { int (*port_fn_ipsec_packet_set)(struct devlink_port *devlink_port, bool enable, struct netlink_ext_ack *extack); + int (*port_fn_max_io_eqs_get)(struct devlink_port *devlink_port, + u32 *max_eqs, + struct netlink_ext_ack *extack); + int (*port_fn_max_io_eqs_set)(struct devlink_port *devlink_port, + u32 max_eqs, + struct netlink_ext_ack *extack); }; void devlink_port_init(struct devlink *devlink, diff --git a/include/net/dsa.h b/include/net/dsa.h index 7c0da9effe..b60e7e410a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -24,9 +24,6 @@ struct dsa_8021q_context; struct tc_action; -struct phy_device; -struct fixed_phy_status; -struct phylink_link_state; #define DSA_TAG_PROTO_NONE_VALUE 0 #define DSA_TAG_PROTO_BRCM_VALUE 1 @@ -327,6 +324,12 @@ struct dsa_port { }; }; +static inline struct dsa_port * +dsa_phylink_to_port(struct phylink_config *config) +{ + return container_of(config, struct dsa_port, pl_config); +} + /* TODO: ideally DSA ports would have a single dp->link_dp member, * and no dst->rtable nor this struct dsa_link would be needed, * but this would require some more complex tree walking, @@ -430,6 +433,11 @@ struct dsa_switch { */ u32 fdb_isolation:1; + /* Drivers that have global DSCP mapping settings must set this to + * true to automatically apply the settings to all ports. + */ + u32 dscp_prio_mapping_is_global:1; + /* Listener for switch fabric events */ struct notifier_block nb; @@ -452,6 +460,11 @@ struct dsa_switch { const struct dsa_switch_ops *ops; /* + * Allow a DSA switch driver to override the phylink MAC ops + */ + const struct phylink_mac_ops *phylink_mac_ops; + + /* * User mii_bus and devices for the individual ports. */ u32 phys_mii_mask; @@ -578,6 +591,10 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p) dsa_switch_for_each_port((_dp), (_ds)) \ if (dsa_port_is_user((_dp))) +#define dsa_switch_for_each_user_port_continue_reverse(_dp, _ds) \ + dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \ + if (dsa_port_is_user((_dp))) + #define dsa_switch_for_each_cpu_port(_dp, _ds) \ dsa_switch_for_each_port((_dp), (_ds)) \ if (dsa_port_is_cpu((_dp))) @@ -858,14 +875,6 @@ struct dsa_switch_ops { int regnum, u16 val); /* - * Link state adjustment (called from libphy) - */ - void (*adjust_link)(struct dsa_switch *ds, int port, - struct phy_device *phydev); - void (*fixed_link_update)(struct dsa_switch *ds, int port, - struct fixed_phy_status *st); - - /* * PHYLINK integration */ void (*phylink_get_caps)(struct dsa_switch *ds, int port, @@ -955,6 +964,10 @@ struct dsa_switch_ops { u8 prio); int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp, u8 prio); + int (*port_set_apptrust)(struct dsa_switch *ds, int port, + const u8 *sel, int nsel); + int (*port_get_apptrust)(struct dsa_switch *ds, int port, u8 *sel, + int *nsel); /* * Suspend and resume @@ -1247,7 +1260,8 @@ struct dsa_switch_ops { int dsa_devlink_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx); int dsa_devlink_param_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx); + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack); int dsa_devlink_params_register(struct dsa_switch *ds, const struct devlink_param *params, size_t params_count); diff --git a/include/net/dscp.h b/include/net/dscp.h new file mode 100644 index 0000000000..ba40540868 --- /dev/null +++ b/include/net/dscp.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */ + +#ifndef __DSCP_H__ +#define __DSCP_H__ + +/* + * DSCP Pools and Codepoint Space Division: + * + * The Differentiated Services (Diffserv) architecture defines a method for + * classifying and managing network traffic using the DS field in IPv4 and IPv6 + * packet headers. This field can carry one of 64 distinct DSCP (Differentiated + * Services Code Point) values, which are divided into three pools based on + * their Least Significant Bits (LSB) patterns and intended usage. Each pool has + * a specific registration procedure for assigning DSCP values: + * + * Pool 1 (Standards Action Pool): + * - Codepoint Space: xxxxx0 + * This pool includes DSCP values ending in '0' (binary), allocated via + * Standards Action. It is intended for globally recognized traffic classes, + * ensuring interoperability across the internet. This pool encompasses + * well-known DSCP values such as CS0-CS7, AFxx, EF, and VOICE-ADMIT. + * + * Pool 2 (Experimental/Local Use Pool): + * - Codepoint Space: xxxx11 + * Reserved for DSCP values ending in '11' (binary), this pool is designated + * for Experimental or Local Use. It allows for private or temporary traffic + * marking schemes not intended for standardized global use, facilitating + * testing and network-specific configurations without impacting + * interoperability. + * + * Pool 3 (Preferential Standardization Pool): + * - Codepoint Space: xxxx01 + * Initially reserved for experimental or local use, this pool now serves as + * a secondary standardization resource should Pool 1 become exhausted. DSCP + * values ending in '01' (binary) are assigned via Standards Action, with a + * focus on adopting new, standardized traffic classes as the need arises. + * + * For pool updates see: + * https://www.iana.org/assignments/dscp-registry/dscp-registry.xhtml + */ + +/* Pool 1: Standardized DSCP values as per [RFC8126] */ +#define DSCP_CS0 0 /* 000000, [RFC2474] */ +/* CS0 is some times called default (DF) */ +#define DSCP_DF 0 /* 000000, [RFC2474] */ +#define DSCP_CS1 8 /* 001000, [RFC2474] */ +#define DSCP_CS2 16 /* 010000, [RFC2474] */ +#define DSCP_CS3 24 /* 011000, [RFC2474] */ +#define DSCP_CS4 32 /* 100000, [RFC2474] */ +#define DSCP_CS5 40 /* 101000, [RFC2474] */ +#define DSCP_CS6 48 /* 110000, [RFC2474] */ +#define DSCP_CS7 56 /* 111000, [RFC2474] */ +#define DSCP_AF11 10 /* 001010, [RFC2597] */ +#define DSCP_AF12 12 /* 001100, [RFC2597] */ +#define DSCP_AF13 14 /* 001110, [RFC2597] */ +#define DSCP_AF21 18 /* 010010, [RFC2597] */ +#define DSCP_AF22 20 /* 010100, [RFC2597] */ +#define DSCP_AF23 22 /* 010110, [RFC2597] */ +#define DSCP_AF31 26 /* 011010, [RFC2597] */ +#define DSCP_AF32 28 /* 011100, [RFC2597] */ +#define DSCP_AF33 30 /* 011110, [RFC2597] */ +#define DSCP_AF41 34 /* 100010, [RFC2597] */ +#define DSCP_AF42 36 /* 100100, [RFC2597] */ +#define DSCP_AF43 38 /* 100110, [RFC2597] */ +#define DSCP_EF 46 /* 101110, [RFC3246] */ +#define DSCP_VOICE_ADMIT 44 /* 101100, [RFC5865] */ + +/* Pool 3: Standardized assignments, previously available for experimental/local + * use + */ +#define DSCP_LE 1 /* 000001, [RFC8622] */ + +#define DSCP_MAX 64 + +#endif /* __DSCP_H__ */ diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h index df6622a5fe..b4a55d2d5e 100644 --- a/include/net/dst_cache.h +++ b/include/net/dst_cache.h @@ -76,7 +76,7 @@ struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, */ static inline void dst_cache_reset(struct dst_cache *dst_cache) { - dst_cache->reset_ts = jiffies; + WRITE_ONCE(dst_cache->reset_ts, jiffies); } /** diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 1b7fae4c6b..4160731dcb 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -198,7 +198,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr, __be32 daddr, __u8 tos, __u8 ttl, __be16 tp_dst, - __be16 flags, + const unsigned long *flags, __be64 tunnel_id, int md_size) { @@ -215,7 +215,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr, } static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, - __be16 flags, + const unsigned long *flags, __be64 tunnel_id, int md_size) { @@ -230,7 +230,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad __u8 tos, __u8 ttl, __be16 tp_dst, __be32 label, - __be16 flags, + const unsigned long *flags, __be64 tunnel_id, int md_size) { @@ -243,7 +243,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad info = &tun_dst->u.tun_info; info->mode = IP_TUNNEL_INFO_IPV6; - info->key.tun_flags = flags; + ip_tunnel_flags_copy(info->key.tun_flags, flags); info->key.tun_id = tunnel_id; info->key.tp_src = 0; info->key.tp_dst = tp_dst; @@ -259,7 +259,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad } static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb, - __be16 flags, + const unsigned long *flags, __be64 tunnel_id, int md_size) { diff --git a/include/net/espintcp.h b/include/net/espintcp.h index 0335bbd765..c70efd704b 100644 --- a/include/net/espintcp.h +++ b/include/net/espintcp.h @@ -32,7 +32,7 @@ struct espintcp_ctx { static inline struct espintcp_ctx *espintcp_getctx(const struct sock *sk) { - struct inet_connection_sock *icsk = inet_csk(sk); + const struct inet_connection_sock *icsk = inet_csk(sk); /* RCU is only needed for diag */ return (__force void *)icsk->icsk_ulp_data; diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 1a7131d6cb..9ab376d1a6 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -97,7 +97,7 @@ struct flow_dissector_key_enc_opts { * here but seems difficult to #include */ u8 len; - __be16 dst_opt_type; + u32 dst_opt_type; }; struct flow_dissector_key_keyid { diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 314087a5e1..ec9f80509f 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -345,7 +345,7 @@ static inline bool flow_action_has_entries(const struct flow_action *action) * flow_offload_has_one_action() - check if exactly one action is present * @action: tc filter flow offload action * - * Returns true if exactly one action is present. + * Return: true if exactly one action is present. */ static inline bool flow_offload_has_one_action(const struct flow_action *action) { @@ -449,6 +449,61 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule, return dissector_uses_key(rule->match.dissector, key); } +/** + * flow_rule_is_supp_control_flags() - check for supported control flags + * @supp_flags: control flags supported by driver + * @ctrl_flags: control flags present in rule + * @extack: The netlink extended ACK for reporting errors. + * + * Return: true if only supported control flags are set, false otherwise. + */ +static inline bool flow_rule_is_supp_control_flags(const u32 supp_flags, + const u32 ctrl_flags, + struct netlink_ext_ack *extack) +{ + if (likely((ctrl_flags & ~supp_flags) == 0)) + return true; + + NL_SET_ERR_MSG_FMT_MOD(extack, + "Unsupported match on control.flags %#x", + ctrl_flags); + + return false; +} + +/** + * flow_rule_has_control_flags() - check for presence of any control flags + * @ctrl_flags: control flags present in rule + * @extack: The netlink extended ACK for reporting errors. + * + * Return: true if control flags are set, false otherwise. + */ +static inline bool flow_rule_has_control_flags(const u32 ctrl_flags, + struct netlink_ext_ack *extack) +{ + return !flow_rule_is_supp_control_flags(0, ctrl_flags, extack); +} + +/** + * flow_rule_match_has_control_flags() - match and check for any control flags + * @rule: The flow_rule under evaluation. + * @extack: The netlink extended ACK for reporting errors. + * + * Return: true if control flags are set, false otherwise. + */ +static inline bool flow_rule_match_has_control_flags(struct flow_rule *rule, + struct netlink_ext_ack *extack) +{ + struct flow_match_control match; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) + return false; + + flow_rule_match_control(rule, &match); + + return flow_rule_has_control_flags(match.mask->flags, extack); +} + struct flow_stats { u64 pkts; u64 bytes; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 9ece6e5a3e..9ab49bfeae 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -2,12 +2,20 @@ #ifndef __NET_GENERIC_NETLINK_H #define __NET_GENERIC_NETLINK_H -#include <linux/genetlink.h> +#include <linux/net.h> #include <net/netlink.h> #include <net/net_namespace.h> +#include <uapi/linux/genetlink.h> #define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN) +/* Non-parallel generic netlink requests are serialized by a global lock. */ +void genl_lock(void); +void genl_unlock(void); + +#define MODULE_ALIAS_GENL_FAMILY(family) \ + MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) + /* Binding to multicast group requires %CAP_NET_ADMIN */ #define GENL_MCAST_CAP_NET_ADMIN BIT(0) /* Binding to multicast group requires %CAP_SYS_ADMIN */ diff --git a/include/net/gre.h b/include/net/gre.h index 4e209708b7..ccd2932032 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -49,67 +49,61 @@ static inline bool netif_is_ip6gretap(const struct net_device *dev) !strcmp(dev->rtnl_link_ops->kind, "ip6gretap"); } -static inline int gre_calc_hlen(__be16 o_flags) +static inline int gre_calc_hlen(const unsigned long *o_flags) { int addend = 4; - if (o_flags & TUNNEL_CSUM) + if (test_bit(IP_TUNNEL_CSUM_BIT, o_flags)) addend += 4; - if (o_flags & TUNNEL_KEY) + if (test_bit(IP_TUNNEL_KEY_BIT, o_flags)) addend += 4; - if (o_flags & TUNNEL_SEQ) + if (test_bit(IP_TUNNEL_SEQ_BIT, o_flags)) addend += 4; return addend; } -static inline __be16 gre_flags_to_tnl_flags(__be16 flags) +static inline void gre_flags_to_tnl_flags(unsigned long *dst, __be16 flags) { - __be16 tflags = 0; - - if (flags & GRE_CSUM) - tflags |= TUNNEL_CSUM; - if (flags & GRE_ROUTING) - tflags |= TUNNEL_ROUTING; - if (flags & GRE_KEY) - tflags |= TUNNEL_KEY; - if (flags & GRE_SEQ) - tflags |= TUNNEL_SEQ; - if (flags & GRE_STRICT) - tflags |= TUNNEL_STRICT; - if (flags & GRE_REC) - tflags |= TUNNEL_REC; - if (flags & GRE_VERSION) - tflags |= TUNNEL_VERSION; - - return tflags; + IP_TUNNEL_DECLARE_FLAGS(res) = { }; + + __assign_bit(IP_TUNNEL_CSUM_BIT, res, flags & GRE_CSUM); + __assign_bit(IP_TUNNEL_ROUTING_BIT, res, flags & GRE_ROUTING); + __assign_bit(IP_TUNNEL_KEY_BIT, res, flags & GRE_KEY); + __assign_bit(IP_TUNNEL_SEQ_BIT, res, flags & GRE_SEQ); + __assign_bit(IP_TUNNEL_STRICT_BIT, res, flags & GRE_STRICT); + __assign_bit(IP_TUNNEL_REC_BIT, res, flags & GRE_REC); + __assign_bit(IP_TUNNEL_VERSION_BIT, res, flags & GRE_VERSION); + + ip_tunnel_flags_copy(dst, res); } -static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags) +static inline __be16 gre_tnl_flags_to_gre_flags(const unsigned long *tflags) { __be16 flags = 0; - if (tflags & TUNNEL_CSUM) + if (test_bit(IP_TUNNEL_CSUM_BIT, tflags)) flags |= GRE_CSUM; - if (tflags & TUNNEL_ROUTING) + if (test_bit(IP_TUNNEL_ROUTING_BIT, tflags)) flags |= GRE_ROUTING; - if (tflags & TUNNEL_KEY) + if (test_bit(IP_TUNNEL_KEY_BIT, tflags)) flags |= GRE_KEY; - if (tflags & TUNNEL_SEQ) + if (test_bit(IP_TUNNEL_SEQ_BIT, tflags)) flags |= GRE_SEQ; - if (tflags & TUNNEL_STRICT) + if (test_bit(IP_TUNNEL_STRICT_BIT, tflags)) flags |= GRE_STRICT; - if (tflags & TUNNEL_REC) + if (test_bit(IP_TUNNEL_REC_BIT, tflags)) flags |= GRE_REC; - if (tflags & TUNNEL_VERSION) + if (test_bit(IP_TUNNEL_VERSION_BIT, tflags)) flags |= GRE_VERSION; return flags; } static inline void gre_build_header(struct sk_buff *skb, int hdr_len, - __be16 flags, __be16 proto, + const unsigned long *flags, __be16 proto, __be32 key, __be32 seq) { + IP_TUNNEL_DECLARE_FLAGS(cond) = { }; struct gre_base_hdr *greh; skb_push(skb, hdr_len); @@ -120,18 +114,22 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len, greh->flags = gre_tnl_flags_to_gre_flags(flags); greh->protocol = proto; - if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { + __set_bit(IP_TUNNEL_KEY_BIT, cond); + __set_bit(IP_TUNNEL_CSUM_BIT, cond); + __set_bit(IP_TUNNEL_SEQ_BIT, cond); + + if (ip_tunnel_flags_intersect(flags, cond)) { __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); - if (flags & TUNNEL_SEQ) { + if (test_bit(IP_TUNNEL_SEQ_BIT, flags)) { *ptr = seq; ptr--; } - if (flags & TUNNEL_KEY) { + if (test_bit(IP_TUNNEL_KEY_BIT, flags)) { *ptr = key; ptr--; } - if (flags & TUNNEL_CSUM && + if (test_bit(IP_TUNNEL_CSUM_BIT, flags) && !(skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { *ptr = 0; diff --git a/include/net/gro.h b/include/net/gro.h index c1d4ca0463..b9b58c1f8d 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -36,15 +36,14 @@ struct napi_gro_cb { /* This is non-zero if the packet cannot be merged with the new skb. */ u16 flush; - /* Save the IP ID here and check when we get to the transport layer */ - u16 flush_id; - /* Number of segments aggregated. */ u16 count; /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */ u16 proto; + u16 pad; + /* Used in napi_gro_cb::free */ #define NAPI_GRO_FREE 1 #define NAPI_GRO_FREE_STOLEN_HEAD 2 @@ -75,8 +74,8 @@ struct napi_gro_cb { /* Used in GRE, set in fou/gue_gro_receive */ u8 is_fou:1; - /* Used to determine if flush_id can be ignored */ - u8 is_atomic:1; + /* Used to determine if ipid_offset can be ignored */ + u8 ip_fixedid:1; /* Number of gro_receive callbacks this packet already went through */ u8 recursion_counter:4; @@ -181,12 +180,17 @@ static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen, return ptr; } +static inline int skb_gro_receive_network_offset(const struct sk_buff *skb) +{ + return NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark]; +} + static inline void *skb_gro_network_header(const struct sk_buff *skb) { if (skb_gro_may_pull(skb, skb_gro_offset(skb))) - return skb_gro_header_fast(skb, skb_network_offset(skb)); + return skb_gro_header_fast(skb, skb_gro_receive_network_offset(skb)); - return skb_network_header(skb); + return skb->data + skb_gro_receive_network_offset(skb); } static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb, @@ -437,7 +441,71 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb, skb_gro_len(skb), proto, 0)); } +static inline int inet_gro_flush(const struct iphdr *iph, const struct iphdr *iph2, + struct sk_buff *p, bool outer) +{ + const u32 id = ntohl(*(__be32 *)&iph->id); + const u32 id2 = ntohl(*(__be32 *)&iph2->id); + const u16 ipid_offset = (id >> 16) - (id2 >> 16); + const u16 count = NAPI_GRO_CB(p)->count; + const u32 df = id & IP_DF; + int flush; + + /* All fields must match except length and checksum. */ + flush = (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | (df ^ (id2 & IP_DF)); + + if (flush | (outer && df)) + return flush; + + /* When we receive our second frame we can make a decision on if we + * continue this flow as an atomic flow with a fixed ID or if we use + * an incrementing ID. + */ + if (count == 1 && df && !ipid_offset) + NAPI_GRO_CB(p)->ip_fixedid = true; + + return ipid_offset ^ (count * !NAPI_GRO_CB(p)->ip_fixedid); +} + +static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr *iph2) +{ + /* <Version:4><Traffic_Class:8><Flow_Label:20> */ + __be32 first_word = *(__be32 *)iph ^ *(__be32 *)iph2; + + /* Flush if Traffic Class fields are different. */ + return !!((first_word & htonl(0x0FF00000)) | + (__force __be32)(iph->hop_limit ^ iph2->hop_limit)); +} + +static inline int __gro_receive_network_flush(const void *th, const void *th2, + struct sk_buff *p, const u16 diff, + bool outer) +{ + const void *nh = th - diff; + const void *nh2 = th2 - diff; + + if (((struct iphdr *)nh)->version == 6) + return ipv6_gro_flush(nh, nh2); + else + return inet_gro_flush(nh, nh2, p, outer); +} + +static inline int gro_receive_network_flush(const void *th, const void *th2, + struct sk_buff *p) +{ + const bool encap_mark = NAPI_GRO_CB(p)->encap_mark; + int off = skb_transport_offset(p); + int flush; + + flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, encap_mark); + if (encap_mark) + flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, false); + + return flush; +} + int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ static inline void gro_normal_list(struct napi_struct *napi) diff --git a/include/net/gtp.h b/include/net/gtp.h index 2a503f035d..c0253c8702 100644 --- a/include/net/gtp.h +++ b/include/net/gtp.h @@ -78,4 +78,9 @@ static inline bool netif_is_gtp(const struct net_device *dev) #define GTP1_F_EXTHDR 0x04 #define GTP1_F_MASK 0x07 +struct gtp_ext_hdr { + __u8 len; + __u8 data[]; +}; + #endif diff --git a/include/net/hotdata.h b/include/net/hotdata.h index 003667a1ef..30e9570beb 100644 --- a/include/net/hotdata.h +++ b/include/net/hotdata.h @@ -38,6 +38,9 @@ struct net_hotdata { int max_backlog; int dev_tx_weight; int dev_rx_weight; + int sysctl_max_skb_frags; + int sysctl_skb_defer_max; + int sysctl_mem_pcpu_rsv; }; #define inet_ehash_secret net_hotdata.tcp_protocol.secret diff --git a/include/net/ieee8021q.h b/include/net/ieee8021q.h new file mode 100644 index 0000000000..8bfe903dd3 --- /dev/null +++ b/include/net/ieee8021q.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */ + +#ifndef _NET_IEEE8021Q_H +#define _NET_IEEE8021Q_H + +#include <linux/errno.h> + +/** + * enum ieee8021q_traffic_type - 802.1Q traffic type priority values (802.1Q-2022) + * + * @IEEE8021Q_TT_BK: Background + * @IEEE8021Q_TT_BE: Best Effort (default). According to 802.1Q-2022, BE is 0 + * but has higher priority than BK which is 1. + * @IEEE8021Q_TT_EE: Excellent Effort + * @IEEE8021Q_TT_CA: Critical Applications + * @IEEE8021Q_TT_VI: Video, < 100 ms latency and jitter + * @IEEE8021Q_TT_VO: Voice, < 10 ms latency and jitter + * @IEEE8021Q_TT_IC: Internetwork Control + * @IEEE8021Q_TT_NC: Network Control + */ +enum ieee8021q_traffic_type { + IEEE8021Q_TT_BK = 0, + IEEE8021Q_TT_BE = 1, + IEEE8021Q_TT_EE = 2, + IEEE8021Q_TT_CA = 3, + IEEE8021Q_TT_VI = 4, + IEEE8021Q_TT_VO = 5, + IEEE8021Q_TT_IC = 6, + IEEE8021Q_TT_NC = 7, + + /* private: */ + IEEE8021Q_TT_MAX, +}; + +#define SIMPLE_IETF_DSCP_TO_IEEE8021Q_TT(dscp) ((dscp >> 3) & 0x7) + +#if IS_ENABLED(CONFIG_NET_IEEE8021Q_HELPERS) + +int ietf_dscp_to_ieee8021q_tt(u8 dscp); +int ieee8021q_tt_to_tc(enum ieee8021q_traffic_type tt, unsigned int num_queues); + +#else + +static inline int ietf_dscp_to_ieee8021q_tt(u8 dscp) +{ + return -EOPNOTSUPP; +} + +static inline int ieee8021q_tt_to_tc(enum ieee8021q_traffic_type tt, + unsigned int num_queues) +{ + return -EOPNOTSUPP; +} + +#endif +#endif /* _NET_IEEE8021Q_H */ diff --git a/include/net/inet_common.h b/include/net/inet_common.h index f50a644d87..c17a6585d0 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -29,8 +29,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags, int is_sendmsg); int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); -int inet_accept(struct socket *sock, struct socket *newsock, int flags, - bool kern); +int inet_accept(struct socket *sock, struct socket *newsock, + struct proto_accept_arg *arg); void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk); int inet_send_prepare(struct sock *sk); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 146ece8563..c0deaafebf 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -147,10 +147,7 @@ struct inet_connection_sock { #define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ #define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */ -static inline struct inet_connection_sock *inet_csk(const struct sock *sk) -{ - return (struct inet_connection_sock *)sk; -} +#define inet_csk(ptr) container_of_const(ptr, struct inet_connection_sock, icsk_inet.sk) static inline void *inet_csk_ca(const struct sock *sk) { @@ -253,7 +250,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk, return (unsigned long)min_t(u64, when, max_when); } -struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern); +struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg); int inet_csk_get_port(struct sock *sk, unsigned short snum); @@ -284,7 +281,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk) static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) { - return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; + return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog); } bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index f28da08a37..2a536eea94 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -111,7 +111,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); -void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family); +void inet_twsk_purge(struct inet_hashinfo *hashinfo); static inline struct net *twsk_net(const struct inet_timewait_sock *twsk) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 73524fa0c0..6cb867ce48 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -340,7 +340,7 @@ static inline void fib6_info_release(struct fib6_info *f6i) { if (f6i && refcount_dec_and_test(&f6i->fib6_ref)) { DEBUG_NET_WARN_ON_ONCE(!hlist_unhashed(&f6i->gc_link)); - call_rcu(&f6i->rcu, fib6_info_destroy_rcu); + call_rcu_hurry(&f6i->rcu, fib6_info_destroy_rcu); } } diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index a18ed24fed..6dbdf60b34 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -127,18 +127,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, const struct in6_addr *daddr, - unsigned int prefs, + unsigned int prefs, int l3mdev_index, struct in6_addr *saddr) { + struct net_device *l3mdev; + struct net_device *dev; + bool same_vrf; int err = 0; - if (f6i && f6i->fib6_prefsrc.plen) { + rcu_read_lock(); + + l3mdev = dev_get_by_index_rcu(net, l3mdev_index); + if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev) + dev = f6i ? fib6_info_nh_dev(f6i) : NULL; + same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev; + if (f6i && f6i->fib6_prefsrc.plen && same_vrf) *saddr = f6i->fib6_prefsrc.addr; - } else { - struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL; + else + err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr); - err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr); - } + rcu_read_unlock(); return err; } diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 74b369bddf..399592405c 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -30,8 +30,8 @@ struct __ip6_tnl_parm { struct in6_addr laddr; /* local tunnel end-point address */ struct in6_addr raddr; /* remote tunnel end-point address */ - __be16 i_flags; - __be16 o_flags; + IP_TUNNEL_DECLARE_FLAGS(i_flags); + IP_TUNNEL_DECLARE_FLAGS(o_flags); __be32 i_key; __be32 o_key; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9b2f69ba5e..c29639b432 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -173,6 +173,7 @@ struct fib_result { unsigned char type; unsigned char scope; u32 tclassid; + dscp_t dscp; struct fib_nh_common *nhc; struct fib_info *fi; struct fib_table *table; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index cc666da626..1db2417b8f 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -36,6 +36,24 @@ (sizeof_field(struct ip_tunnel_key, u) - \ sizeof_field(struct ip_tunnel_key, u.ipv4)) +#define __ipt_flag_op(op, ...) \ + op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM) + +#define IP_TUNNEL_DECLARE_FLAGS(...) \ + __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__) + +#define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__) +#define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__) +#define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__) +#define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__) + +#define ip_tunnel_flags_empty(...) \ + __ipt_flag_op(bitmap_empty, __VA_ARGS__) +#define ip_tunnel_flags_intersect(...) \ + __ipt_flag_op(bitmap_intersects, __VA_ARGS__) +#define ip_tunnel_flags_subset(...) \ + __ipt_flag_op(bitmap_subset, __VA_ARGS__) + struct ip_tunnel_key { __be64 tun_id; union { @@ -48,11 +66,11 @@ struct ip_tunnel_key { struct in6_addr dst; } ipv6; } u; - __be16 tun_flags; - u8 tos; /* TOS for IPv4, TC for IPv6 */ - u8 ttl; /* TTL for IPv4, HL for IPv6 */ + IP_TUNNEL_DECLARE_FLAGS(tun_flags); __be32 label; /* Flow Label for IPv6 */ u32 nhid; + u8 tos; /* TOS for IPv4, TC for IPv6 */ + u8 ttl; /* TTL for IPv4, HL for IPv6 */ __be16 tp_src; __be16 tp_dst; __u8 flow_flags; @@ -110,6 +128,17 @@ struct ip_tunnel_prl_entry { struct metadata_dst; +/* Kernel-side variant of ip_tunnel_parm */ +struct ip_tunnel_parm_kern { + char name[IFNAMSIZ]; + IP_TUNNEL_DECLARE_FLAGS(i_flags); + IP_TUNNEL_DECLARE_FLAGS(o_flags); + __be32 i_key; + __be32 o_key; + int link; + struct iphdr iph; +}; + struct ip_tunnel { struct ip_tunnel __rcu *next; struct hlist_node hash_node; @@ -136,7 +165,7 @@ struct ip_tunnel { struct dst_cache dst_cache; - struct ip_tunnel_parm parms; + struct ip_tunnel_parm_kern parms; int mlink; int encap_hlen; /* Encap header length (FOU,GUE) */ @@ -157,7 +186,7 @@ struct ip_tunnel { }; struct tnl_ptk_info { - __be16 flags; + IP_TUNNEL_DECLARE_FLAGS(flags); __be16 proto; __be32 key; __be32 seq; @@ -179,11 +208,80 @@ struct ip_tunnel_net { int type; }; +static inline void ip_tunnel_set_options_present(unsigned long *flags) +{ + IP_TUNNEL_DECLARE_FLAGS(present) = { }; + + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); + + ip_tunnel_flags_or(flags, flags, present); +} + +static inline void ip_tunnel_clear_options_present(unsigned long *flags) +{ + IP_TUNNEL_DECLARE_FLAGS(present) = { }; + + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); + + __ipt_flag_op(bitmap_andnot, flags, flags, present); +} + +static inline bool ip_tunnel_is_options_present(const unsigned long *flags) +{ + IP_TUNNEL_DECLARE_FLAGS(present) = { }; + + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); + + return ip_tunnel_flags_intersect(flags, present); +} + +static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags) +{ + IP_TUNNEL_DECLARE_FLAGS(supp) = { }; + + bitmap_set(supp, 0, BITS_PER_TYPE(__be16)); + __set_bit(IP_TUNNEL_VTI_BIT, supp); + + return ip_tunnel_flags_subset(flags, supp); +} + +static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags) +{ + ip_tunnel_flags_zero(dst); + + bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16)); + __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI); +} + +static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags) +{ + __be16 ret; + + ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16))); + if (test_bit(IP_TUNNEL_VTI_BIT, flags)) + ret |= VTI_ISVTI; + + return ret; +} + static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, __be32 saddr, __be32 daddr, u8 tos, u8 ttl, __be32 label, __be16 tp_src, __be16 tp_dst, - __be64 tun_id, __be16 tun_flags) + __be64 tun_id, + const unsigned long *tun_flags) { key->tun_id = tun_id; key->u.ipv4.src = saddr; @@ -193,7 +291,7 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, key->tos = tos; key->ttl = ttl; key->label = label; - key->tun_flags = tun_flags; + ip_tunnel_flags_copy(key->tun_flags, tun_flags); /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of * the upper tunnel are used. @@ -214,12 +312,8 @@ ip_tunnel_dst_cache_usable(const struct sk_buff *skb, { if (skb->mark) return false; - if (!info) - return true; - if (info->key.tun_flags & TUNNEL_NOCACHE) - return false; - return true; + return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); } static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info @@ -291,14 +385,18 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const u8 proto, int tunnel_hlen); -int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, + int cmd); +bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, + const void __user *data); +bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp); int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, - int link, __be16 flags, + int link, const unsigned long *flags, __be32 remote, __be32 local, __be32 key); @@ -307,16 +405,16 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, bool log_ecn_error); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark); + struct ip_tunnel_parm_kern *p, __u32 fwmark); int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark); + struct ip_tunnel_parm_kern *p, __u32 fwmark); void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], struct ip_tunnel_encap *encap); void ip_tunnel_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms); + struct ip_tunnel_parm_kern *parms); extern const struct header_ops ip_tunnel_header_ops; __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); @@ -548,12 +646,13 @@ static inline void ip_tunnel_info_opts_get(void *to, static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, const void *from, int len, - __be16 flags) + const unsigned long *flags) { info->options_len = len; if (len > 0) { memcpy(ip_tunnel_info_opts(info), from, len); - info->key.tun_flags |= flags; + ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags, + flags); } } @@ -597,7 +696,7 @@ static inline void ip_tunnel_info_opts_get(void *to, static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, const void *from, int len, - __be16 flags) + const unsigned long *flags) { info->options_len = 0; } diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h index 5cd7871127..4d114e6d6d 100644 --- a/include/net/iucv/iucv.h +++ b/include/net/iucv/iucv.h @@ -82,7 +82,12 @@ struct iucv_array { } __attribute__ ((aligned (8))); extern const struct bus_type iucv_bus; -extern struct device *iucv_root; + +struct device_driver; + +struct device *iucv_alloc_device(const struct attribute_group **attrs, + struct device_driver *driver, void *priv, + const char *fmt, ...) __printf(4, 5); /* * struct iucv_path diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h new file mode 100644 index 0000000000..f29ea3e34c --- /dev/null +++ b/include/net/libeth/rx.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef __LIBETH_RX_H +#define __LIBETH_RX_H + +#include <linux/if_vlan.h> + +#include <net/page_pool/helpers.h> +#include <net/xdp.h> + +/* Rx buffer management */ + +/* Space reserved in front of each frame */ +#define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +/* Maximum headroom for worst-case calculations */ +#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM +/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */ +#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN) + +/* Always use order-0 pages */ +#define LIBETH_RX_PAGE_ORDER 0 +/* Pick a sane buffer stride and align to a cacheline boundary */ +#define LIBETH_RX_BUF_STRIDE SKB_DATA_ALIGN(128) +/* HW-writeable space in one buffer: truesize - headroom/tailroom, aligned */ +#define LIBETH_RX_PAGE_LEN(hr) \ + ALIGN_DOWN(SKB_MAX_ORDER(hr, LIBETH_RX_PAGE_ORDER), \ + LIBETH_RX_BUF_STRIDE) + +/** + * struct libeth_fqe - structure representing an Rx buffer (fill queue element) + * @page: page holding the buffer + * @offset: offset from the page start (to the headroom) + * @truesize: total space occupied by the buffer (w/ headroom and tailroom) + * + * Depending on the MTU, API switches between one-page-per-frame and shared + * page model (to conserve memory on bigger-page platforms). In case of the + * former, @offset is always 0 and @truesize is always ```PAGE_SIZE```. + */ +struct libeth_fqe { + struct page *page; + u32 offset; + u32 truesize; +} __aligned_largest; + +/** + * struct libeth_fq - structure representing a buffer (fill) queue + * @fp: hotpath part of the structure + * @pp: &page_pool for buffer management + * @fqes: array of Rx buffers + * @truesize: size to allocate per buffer, w/overhead + * @count: number of descriptors/buffers the queue has + * @buf_len: HW-writeable length per each buffer + * @nid: ID of the closest NUMA node with memory + */ +struct libeth_fq { + struct_group_tagged(libeth_fq_fp, fp, + struct page_pool *pp; + struct libeth_fqe *fqes; + + u32 truesize; + u32 count; + ); + + /* Cold fields */ + u32 buf_len; + int nid; +}; + +int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi); +void libeth_rx_fq_destroy(struct libeth_fq *fq); + +/** + * libeth_rx_alloc - allocate a new Rx buffer + * @fq: fill queue to allocate for + * @i: index of the buffer within the queue + * + * Return: DMA address to be passed to HW for Rx on successful allocation, + * ```DMA_MAPPING_ERROR``` otherwise. + */ +static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i) +{ + struct libeth_fqe *buf = &fq->fqes[i]; + + buf->truesize = fq->truesize; + buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize); + if (unlikely(!buf->page)) + return DMA_MAPPING_ERROR; + + return page_pool_get_dma_addr(buf->page) + buf->offset + + fq->pp->p.offset; +} + +void libeth_rx_recycle_slow(struct page *page); + +/** + * libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA + * @fqe: buffer to process + * @len: frame length from the descriptor + * + * Process the buffer after it's written by HW. The regular path is to + * synchronize DMA for CPU, but in case of no data it will be immediately + * recycled back to its PP. + * + * Return: true when there's data to process, false otherwise. + */ +static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe, + u32 len) +{ + struct page *page = fqe->page; + + /* Very rare, but possible case. The most common reason: + * the last fragment contained FCS only, which was then + * stripped by the HW. + */ + if (unlikely(!len)) { + libeth_rx_recycle_slow(page); + return false; + } + + page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len); + + return true; +} + +/* Converting abstract packet type numbers into a software structure with + * the packet parameters to do O(1) lookup on Rx. + */ + +enum { + LIBETH_RX_PT_OUTER_L2 = 0U, + LIBETH_RX_PT_OUTER_IPV4, + LIBETH_RX_PT_OUTER_IPV6, +}; + +enum { + LIBETH_RX_PT_NOT_FRAG = 0U, + LIBETH_RX_PT_FRAG, +}; + +enum { + LIBETH_RX_PT_TUNNEL_IP_NONE = 0U, + LIBETH_RX_PT_TUNNEL_IP_IP, + LIBETH_RX_PT_TUNNEL_IP_GRENAT, + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC, + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC_VLAN, +}; + +enum { + LIBETH_RX_PT_TUNNEL_END_NONE = 0U, + LIBETH_RX_PT_TUNNEL_END_IPV4, + LIBETH_RX_PT_TUNNEL_END_IPV6, +}; + +enum { + LIBETH_RX_PT_INNER_NONE = 0U, + LIBETH_RX_PT_INNER_UDP, + LIBETH_RX_PT_INNER_TCP, + LIBETH_RX_PT_INNER_SCTP, + LIBETH_RX_PT_INNER_ICMP, + LIBETH_RX_PT_INNER_TIMESYNC, +}; + +#define LIBETH_RX_PT_PAYLOAD_NONE PKT_HASH_TYPE_NONE +#define LIBETH_RX_PT_PAYLOAD_L2 PKT_HASH_TYPE_L2 +#define LIBETH_RX_PT_PAYLOAD_L3 PKT_HASH_TYPE_L3 +#define LIBETH_RX_PT_PAYLOAD_L4 PKT_HASH_TYPE_L4 + +struct libeth_rx_pt { + u32 outer_ip:2; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:3; + enum pkt_hash_types payload_layer:2; + + u32 pad:2; + enum xdp_rss_hash_type hash_type:16; +}; + +void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt); + +/** + * libeth_rx_pt_get_ip_ver - get IP version from a packet type structure + * @pt: packet type params + * + * Wrapper to compile out the IPv6 code from the drivers when not supported + * by the kernel. + * + * Return: @pt.outer_ip or stub for IPv6 when not compiled-in. + */ +static inline u32 libeth_rx_pt_get_ip_ver(struct libeth_rx_pt pt) +{ +#if !IS_ENABLED(CONFIG_IPV6) + switch (pt.outer_ip) { + case LIBETH_RX_PT_OUTER_IPV4: + return LIBETH_RX_PT_OUTER_IPV4; + default: + return LIBETH_RX_PT_OUTER_L2; + } +#else + return pt.outer_ip; +#endif +} + +/* libeth_has_*() can be used to quickly check whether the HW metadata is + * available to avoid further expensive processing such as descriptor reads. + * They already check for the corresponding netdev feature to be enabled, + * thus can be used as drop-in replacements. + */ + +static inline bool libeth_rx_pt_has_checksum(const struct net_device *dev, + struct libeth_rx_pt pt) +{ + /* Non-zero _INNER* is only possible when _OUTER_IPV* is set, + * it is enough to check only for the L4 type. + */ + return likely(pt.inner_prot > LIBETH_RX_PT_INNER_NONE && + (dev->features & NETIF_F_RXCSUM)); +} + +static inline bool libeth_rx_pt_has_hash(const struct net_device *dev, + struct libeth_rx_pt pt) +{ + return likely(pt.payload_layer > LIBETH_RX_PT_PAYLOAD_NONE && + (dev->features & NETIF_F_RXHASH)); +} + +/** + * libeth_rx_pt_set_hash - fill in skb hash value basing on the PT + * @skb: skb to fill the hash in + * @hash: 32-bit hash value from the descriptor + * @pt: packet type + */ +static inline void libeth_rx_pt_set_hash(struct sk_buff *skb, u32 hash, + struct libeth_rx_pt pt) +{ + skb_set_hash(skb, hash, pt.payload_layer); +} + +#endif /* __LIBETH_RX_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index baaff7bc09..45ad37adbe 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -361,7 +361,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response * status changed. * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed. - * @BSS_CHANGED_MLD_TTLM: TID to link mapping was changed + * @BSS_CHANGED_MLD_TTLM: negotiated TID to link mapping was changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -1924,10 +1924,12 @@ enum ieee80211_neg_ttlm_res { * @active_links: The bitmap of active links, or 0 for non-MLO. * The driver shouldn't change this directly, but use the * API calls meant for that purpose. - * @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO. - * Must be a subset of valid_links. + * @dormant_links: subset of the valid links that are disabled/suspended + * due to advertised or negotiated TTLM respectively. + * 0 for non-MLO. * @suspended_links: subset of dormant_links representing links that are - * suspended. + * suspended due to negotiated TTLM, and could be activated in the + * future by tearing down the TTLM negotiation. * 0 for non-MLO. * @neg_ttlm: negotiated TID to link mapping info. * see &struct ieee80211_neg_ttlm. @@ -2052,7 +2054,7 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) * This can be used by mac80211 drivers with direct cfg80211 APIs * (like the vendor commands) that get a wdev. * - * Note that this function may return %NULL if the given wdev isn't + * Return: pointer to the wdev, or %NULL if the given wdev isn't * associated with a vif that the driver knows about (e.g. monitor * or AP_VLAN interfaces.) */ @@ -2065,6 +2067,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * This can be used by mac80211 drivers with direct cfg80211 APIs * (like the vendor commands) that needs to get the wdev for a vif. * This can also be useful to get the netdev associated to a vif. + * + * Return: pointer to the wdev */ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); @@ -2123,8 +2127,8 @@ static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif) * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key. * @IEEE80211_KEY_FLAG_NO_AUTO_TX: Key needs explicit Tx activation. * @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver - * for a AES_CMAC key to indicate that it requires sequence number - * generation only + * for a AES_CMAC or a AES_GMAC key to indicate that it requires sequence + * number generation only * @IEEE80211_KEY_FLAG_SPP_AMSDU: SPP A-MSDUs can be used with this key * (set by mac80211 from the sta->spp_amsdu flag) */ @@ -2780,6 +2784,8 @@ struct ieee80211_txq { * * @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT * and connecting with a lower bandwidth instead + * @IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ: HW requires disabling puncturing in + * EHT in 5 GHz and connecting with a lower bandwidth instead * * @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so * no need to stop queues. This really should be set by a driver that @@ -2844,6 +2850,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_DETECTS_COLOR_COLLISION, IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX, IEEE80211_HW_DISALLOW_PUNCTURING, + IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ, IEEE80211_HW_HANDLES_QUIET_CSA, /* keep last, obviously */ @@ -5600,7 +5607,7 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id); * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @link_id: valid link_id during MLO or 0 for non-MLO * - * This function returns whether the countdown reached zero. + * Return: %true if the countdown reached 1, %false otherwise */ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif, unsigned int link_id); @@ -5608,12 +5615,13 @@ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif, /** * ieee80211_color_change_finish - notify mac80211 about color change * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @link_id: valid link_id during MLO or 0 for non-MLO * * After a color change announcement was scheduled and the counter in this * announcement hits 1, this function must be called by the driver to * notify mac80211 that the color can be changed */ -void ieee80211_color_change_finish(struct ieee80211_vif *vif); +void ieee80211_color_change_finish(struct ieee80211_vif *vif, u8 link_id); /** * ieee80211_proberesp_get - retrieve a Probe Response template @@ -5945,8 +5953,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf); * key(s) will be available. These will be needed by mac80211 for proper * RX processing, so this function allows setting them. * - * The function returns the newly allocated key structure, which will - * have similar contents to the passed key configuration but point to + * Return: the newly allocated key structure, which will have + * similar contents to the passed key configuration but point to * mac80211-owned memory. In case of errors, the function returns an * ERR_PTR(), use IS_ERR() etc. * @@ -6345,6 +6353,8 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, * may be %NULL if the link ID is not needed * * Obtain the STA by link address, must use RCU protection. + * + * Return: pointer to STA if found, otherwise %NULL. */ struct ieee80211_sta * ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw, @@ -6474,8 +6484,8 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, * @hw: pointer obtained from ieee80211_alloc_hw() * @txq: pointer obtained from station or virtual interface * - * Return true if the AQL's airtime limit has not been reached and the txq can - * continue to send more packets to the device. Otherwise return false. + * Return: %true if the AQL's airtime limit has not been reached and the txq can + * continue to send more packets to the device. Otherwise return %false. */ bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq); @@ -6978,6 +6988,8 @@ bool rate_usable_index_exists(struct ieee80211_supported_band *sband, * @hw: pointer as obtained from ieee80211_alloc_hw() * @pubsta: &struct ieee80211_sta pointer to the target destination. * @rates: new tx rate set to be used for this station. + * + * Return: 0 on success. An error code otherwise. */ int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, @@ -7138,6 +7150,8 @@ void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif, * @band: the band to transmit on * @sta: optional pointer to get the station to send the frame to * + * Return: %true if the skb was prepared, %false otherwise + * * Note: must be called under RCU lock */ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, @@ -7154,6 +7168,8 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, * * @skb: packet injected by userspace * @dev: the &struct device of this 802.11 device + * + * Return: %true if the radiotap header was parsed, %false otherwise */ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, struct net_device *dev); @@ -7263,7 +7279,7 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); * @txq: pointer obtained from station or virtual interface, or from * ieee80211_next_txq() * - * Returns the skb if successful, %NULL if no frame was available. + * Return: the skb if successful, %NULL if no frame was available. * * Note that this must be called in an rcu_read_lock() critical section, * which can only be released after the SKB was handled. Some pointers in @@ -7289,6 +7305,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, * @hw: pointer as obtained from ieee80211_alloc_hw() * @txq: pointer obtained from station or virtual interface, or from * ieee80211_next_txq() + * + * Return: the skb if successful, %NULL if no frame was available. */ static inline struct sk_buff *ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw, struct ieee80211_txq *txq) @@ -7320,7 +7338,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw, * @hw: pointer as obtained from ieee80211_alloc_hw() * @ac: AC number to return packets from. * - * Returns the next txq if successful, %NULL if no queue is eligible. If a txq + * Return: the next txq if successful, %NULL if no queue is eligible. If a txq * is returned, it should be returned with ieee80211_return_txq() after the * driver has finished scheduling it. */ @@ -7403,6 +7421,8 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, * * @hw: pointer as obtained from ieee80211_alloc_hw() * @txq: pointer obtained from station or virtual interface + * + * Return: %true if transmission is allowed, %false otherwise */ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); @@ -7463,6 +7483,8 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif, * @status: &struct ieee80211_rx_status containing the transmission rate * information. * @len: frame length in bytes + * + * Return: the airtime estimate */ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, struct ieee80211_rx_status *status, @@ -7477,23 +7499,13 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, * @hw: pointer as obtained from ieee80211_alloc_hw() * @info: &struct ieee80211_tx_info of the frame. * @len: frame length in bytes + * + * Return: the airtime estimate */ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, int len); /** - * ieee80211_set_hw_80211_encap - enable hardware encapsulation offloading. - * - * This function is used to notify mac80211 that a vif can be passed raw 802.3 - * frames. The driver needs to then handle the 802.11 encapsulation inside the - * hardware or firmware. - * - * @vif: &struct ieee80211_vif pointer from the add_interface callback. - * @enable: indicate if the feature should be turned on or off - */ -bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable); - -/** * ieee80211_get_fils_discovery_tmpl - Get FILS discovery template. * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. @@ -7522,6 +7534,7 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, /** * ieee80211_obss_color_collision_notify - notify userland about a BSS color * collision. + * @link_id: valid link_id during MLO or 0 for non-MLO * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is @@ -7529,7 +7542,7 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, */ void ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif, - u64 color_bitmap); + u64 color_bitmap, u8 link_id); /** * ieee80211_is_tx_data - check if frame is a data frame @@ -7538,6 +7551,8 @@ ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif, * hardware encapsulation enabled are data frames. * * @skb: the frame to be transmitted. + * + * Return: %true if @skb is a data frame, %false otherwise */ static inline bool ieee80211_is_tx_data(struct sk_buff *skb) { @@ -7573,6 +7588,8 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb) * - change_sta_links(0x10) for each affected STA (the AP) * - assign_vif_chanctx(link_id=4) * - change_vif_links(0x10) + * + * Return: 0 on success. An error code otherwise. */ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links); @@ -7589,6 +7606,15 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links); void ieee80211_set_active_links_async(struct ieee80211_vif *vif, u16 active_links); +/** + * ieee80211_send_teardown_neg_ttlm - tear down a negotiated TTLM request + * @vif: the interface on which the tear down request should be sent. + * + * This function can be used to tear down a previously accepted negotiated + * TTLM request. + */ +void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif); + /* for older drivers - let's not document these ... */ int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 4eeedf1471..f207a6e104 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -670,6 +670,7 @@ struct mana_cfg_rx_steer_req_v2 { u8 hashkey[MANA_HASH_KEY_SIZE]; u8 cqe_coalescing_enable; u8 reserved2[7]; + mana_handle_t indir_tab[] __counted_by(num_indir_entries); }; /* HW DATA */ struct mana_cfg_rx_steer_resp { @@ -795,4 +796,6 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, u32 doorbell_pg_id); void mana_uncfg_vport(struct mana_port_context *apc); + +struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index); #endif /* _MANA_H */ diff --git a/include/net/mptcp.h b/include/net/mptcp.h index fb996124b3..0bc4ab03f4 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -97,6 +97,9 @@ struct mptcp_out_options { }; #define MPTCP_SCHED_NAME_MAX 16 +#define MPTCP_SCHED_MAX 128 +#define MPTCP_SCHED_BUF_MAX (MPTCP_SCHED_NAME_MAX * MPTCP_SCHED_MAX) + #define MPTCP_SUBFLOWS_MAX 8 struct mptcp_sched_data { diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index 1ec4085853..a8a7e48dfa 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -9,11 +9,41 @@ struct netdev_queue_stats_rx { u64 bytes; u64 packets; u64 alloc_fail; + + u64 hw_drops; + u64 hw_drop_overruns; + + u64 csum_unnecessary; + u64 csum_none; + u64 csum_bad; + + u64 hw_gro_packets; + u64 hw_gro_bytes; + u64 hw_gro_wire_packets; + u64 hw_gro_wire_bytes; + + u64 hw_drop_ratelimits; }; struct netdev_queue_stats_tx { u64 bytes; u64 packets; + + u64 hw_drops; + u64 hw_drop_errors; + + u64 csum_none; + u64 needs_csum; + + u64 hw_gso_packets; + u64 hw_gso_bytes; + u64 hw_gso_wire_packets; + u64 hw_gso_wire_bytes; + + u64 hw_drop_ratelimits; + + u64 stop; + u64 wake; }; /** @@ -61,6 +91,37 @@ struct netdev_stat_ops { }; /** + * struct netdev_queue_mgmt_ops - netdev ops for queue management + * + * @ndo_queue_mem_size: Size of the struct that describes a queue's memory. + * + * @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index. + * The new memory is written at the specified address. + * + * @ndo_queue_mem_free: Free memory from an RX queue. + * + * @ndo_queue_start: Start an RX queue with the specified memory and at the + * specified index. + * + * @ndo_queue_stop: Stop the RX queue at the specified index. The stopped + * queue's memory is written at the specified address. + */ +struct netdev_queue_mgmt_ops { + size_t ndo_queue_mem_size; + int (*ndo_queue_mem_alloc)(struct net_device *dev, + void *per_queue_mem, + int idx); + void (*ndo_queue_mem_free)(struct net_device *dev, + void *per_queue_mem); + int (*ndo_queue_start)(struct net_device *dev, + void *per_queue_mem, + int idx); + int (*ndo_queue_stop)(struct net_device *dev, + void *per_queue_mem, + int idx); +}; + +/** * DOC: Lockless queue stopping / waking helpers. * * The netif_txq_maybe_stop() and __netif_txq_completed_wake() diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 2164fa350f..188d41da1a 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -416,7 +416,7 @@ struct nft_expr_info; int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla, struct nft_expr_info *info); -int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src); +int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src, gfp_t gfp); void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr); int nft_expr_dump(struct sk_buff *skb, unsigned int attr, const struct nft_expr *expr, bool reset); @@ -940,7 +940,7 @@ struct nft_expr_ops { struct nft_regs *regs, const struct nft_pktinfo *pkt); int (*clone)(struct nft_expr *dst, - const struct nft_expr *src); + const struct nft_expr *src, gfp_t gfp); unsigned int size; int (*init)(const struct nft_ctx *ctx, diff --git a/include/net/netlabel.h b/include/net/netlabel.h index f3ab0b8a4b..654bc777d2 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h @@ -274,15 +274,17 @@ struct netlbl_calipso_ops { * on success, NULL on failure. * */ -static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags) +static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc_noprof(gfp_t flags) { struct netlbl_lsm_cache *cache; - cache = kzalloc(sizeof(*cache), flags); + cache = kzalloc_noprof(sizeof(*cache), flags); if (cache) refcount_set(&cache->refcount, 1); return cache; } +#define netlbl_secattr_cache_alloc(...) \ + alloc_hooks(netlbl_secattr_cache_alloc_noprof(__VA_ARGS__)) /** * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct @@ -311,10 +313,11 @@ static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) * on failure. * */ -static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc(gfp_t flags) +static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc_noprof(gfp_t flags) { - return kzalloc(sizeof(struct netlbl_lsm_catmap), flags); + return kzalloc_noprof(sizeof(struct netlbl_lsm_catmap), flags); } +#define netlbl_catmap_alloc(...) alloc_hooks(netlbl_catmap_alloc_noprof(__VA_ARGS__)) /** * netlbl_catmap_free - Free a LSM secattr catmap @@ -376,10 +379,11 @@ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) * pointer on success, or NULL on failure. * */ -static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(gfp_t flags) +static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc_noprof(gfp_t flags) { - return kzalloc(sizeof(struct netlbl_lsm_secattr), flags); + return kzalloc_noprof(sizeof(struct netlbl_lsm_secattr), flags); } +#define netlbl_secattr_alloc(...) alloc_hooks(netlbl_secattr_alloc_noprof(__VA_ARGS__)) /** * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct @@ -470,7 +474,8 @@ void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state); int netlbl_enabled(void); int netlbl_sock_setattr(struct sock *sk, u16 family, - const struct netlbl_lsm_secattr *secattr); + const struct netlbl_lsm_secattr *secattr, + bool sk_locked); void netlbl_sock_delattr(struct sock *sk); int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr); @@ -487,6 +492,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb, u16 family, struct netlbl_lsm_secattr *secattr); void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway); +bool netlbl_sk_lock_check(struct sock *sk); /* * LSM label mapping cache operations @@ -614,7 +620,8 @@ static inline int netlbl_enabled(void) } static inline int netlbl_sock_setattr(struct sock *sk, u16 family, - const struct netlbl_lsm_secattr *secattr) + const struct netlbl_lsm_secattr *secattr, + bool sk_locked) { return -ENOSYS; } @@ -673,6 +680,11 @@ static inline struct audit_buffer *netlbl_audit_start(int type, { return NULL; } + +static inline bool netlbl_sk_lock_check(struct sock *sk) +{ + return true; +} #endif /* CONFIG_NETLABEL */ const struct netlbl_calipso_ops * diff --git a/include/net/netlink.h b/include/net/netlink.h index c19ff921b6..e78ce008e0 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -41,7 +41,8 @@ * nlmsg_get_pos() return current position in message * nlmsg_trim() trim part of message * nlmsg_cancel() cancel message construction - * nlmsg_free() free a netlink message + * nlmsg_consume() free a netlink message (expected) + * nlmsg_free() free a netlink message (drop) * * Message Sending: * nlmsg_multicast() multicast message to several groups @@ -157,7 +158,11 @@ * nla_parse() parse and validate stream of attrs * nla_parse_nested() parse nested attributes * nla_for_each_attr() loop over all attributes + * nla_for_each_attr_type() loop over all attributes with the + * given type * nla_for_each_nested() loop over the nested attributes + * nla_for_each_nested_type() loop over the nested attributes with + * the given type *========================================================================= */ @@ -1078,7 +1083,7 @@ static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh) } /** - * nlmsg_free - free a netlink message + * nlmsg_free - drop a netlink message * @skb: socket buffer of netlink message */ static inline void nlmsg_free(struct sk_buff *skb) @@ -1087,6 +1092,15 @@ static inline void nlmsg_free(struct sk_buff *skb) } /** + * nlmsg_consume - free a netlink message + * @skb: socket buffer of netlink message + */ +static inline void nlmsg_consume(struct sk_buff *skb) +{ + consume_skb(skb); +} + +/** * nlmsg_multicast_filtered - multicast a netlink message with filter function * @sk: netlink socket to spread messages to * @skb: netlink message as socket buffer @@ -1891,10 +1905,11 @@ static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla) * @src: netlink attribute to duplicate from * @gfp: GFP mask */ -static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp) +static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp) { - return kmemdup(nla_data(src), nla_len(src), gfp); + return kmemdup_noprof(nla_data(src), nla_len(src), gfp); } +#define nla_memdup(...) alloc_hooks(nla_memdup_noprof(__VA_ARGS__)) /** * nla_nest_start_noflag - Start a new level of nested attributes @@ -2071,6 +2086,18 @@ static inline int nla_total_size_64bit(int payload) pos = nla_next(pos, &(rem))) /** + * nla_for_each_attr_type - iterate over a stream of attributes + * @pos: loop counter, set to current attribute + * @type: required attribute type for @pos + * @head: head of attribute stream + * @len: length of attribute stream + * @rem: initialized to len, holds bytes currently remaining in stream + */ +#define nla_for_each_attr_type(pos, type, head, len, rem) \ + nla_for_each_attr(pos, head, len, rem) \ + if (nla_type(pos) == type) + +/** * nla_for_each_nested - iterate over nested attributes * @pos: loop counter, set to current attribute * @nla: attribute containing the nested attributes @@ -2080,6 +2107,17 @@ static inline int nla_total_size_64bit(int payload) nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem) /** + * nla_for_each_nested_type - iterate over nested attributes + * @pos: loop counter, set to current attribute + * @type: required attribute type for @pos + * @nla: attribute containing the nested attributes + * @rem: initialized to len, holds bytes currently remaining in stream + */ +#define nla_for_each_nested_type(pos, type, nla, rem) \ + nla_for_each_nested(pos, nla, rem) \ + if (nla_type(pos) == type) + +/** * nla_is_last - Test if attribute is last in stream * @nla: attribute to test * @rem: bytes remaining in stream diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 7ca315ad50..68463aebcc 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -267,7 +267,7 @@ static inline bool nexthop_get(struct nexthop *nh) static inline void nexthop_put(struct nexthop *nh) { if (refcount_dec_and_test(&nh->refcnt)) - call_rcu(&nh->rcu, nexthop_free_rcu); + call_rcu_hurry(&nh->rcu, nexthop_free_rcu); } static inline bool nexthop_cmp(const struct nexthop *nh1, diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index 1d397c1a00..873631c79a 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -52,13 +52,15 @@ #ifndef _NET_PAGE_POOL_HELPERS_H #define _NET_PAGE_POOL_HELPERS_H +#include <linux/dma-mapping.h> + #include <net/page_pool/types.h> #ifdef CONFIG_PAGE_POOL_STATS /* Deprecated driver-facing API, use netlink instead */ int page_pool_ethtool_stats_get_count(void); u8 *page_pool_ethtool_stats_get_strings(u8 *data); -u64 *page_pool_ethtool_stats_get(u64 *data, void *stats); +u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats); bool page_pool_get_stats(const struct page_pool *pool, struct page_pool_stats *stats); @@ -73,7 +75,7 @@ static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) return data; } -static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats) +static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) { return data; } @@ -204,8 +206,8 @@ static inline void *page_pool_dev_alloc_va(struct page_pool *pool, * Get the stored dma direction. A driver might decide to store this locally * and avoid the extra cache line from page_pool to determine the direction. */ -static -inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) +static inline enum dma_data_direction +page_pool_get_dma_dir(const struct page_pool *pool) { return pool->p.dma_dir; } @@ -370,7 +372,7 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va, * Fetch the DMA address of the page. The page pool to which the page belongs * must had been created with PP_FLAG_DMA_MAP. */ -static inline dma_addr_t page_pool_get_dma_addr(struct page *page) +static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) { dma_addr_t ret = page->dma_addr; @@ -395,6 +397,28 @@ static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) return false; } +/** + * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW + * @pool: &page_pool the @page belongs to + * @page: page to sync + * @offset: offset from page start to "hard" start if using PP frags + * @dma_sync_size: size of the data written to the page + * + * Can be used as a shorthand to sync Rx pages before accessing them in the + * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``. + * Note that this version performs DMA sync unconditionally, even if the + * associated PP doesn't perform sync-for-device. + */ +static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, + const struct page *page, + u32 offset, u32 dma_sync_size) +{ + dma_sync_single_range_for_cpu(pool->p.dev, + page_pool_get_dma_addr(page), + offset + pool->p.offset, dma_sync_size, + page_pool_get_dma_dir(pool)); +} + static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt); diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 5e43a08d32..7e8477057f 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -45,20 +45,21 @@ struct pp_alloc_cache { /** * struct page_pool_params - page pool parameters - * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV + * @fast: params accessed frequently on hotpath * @order: 2^order pages on allocation * @pool_size: size of the ptr_ring * @nid: NUMA node id to allocate from pages from * @dev: device, for DMA pre-mapping purposes - * @netdev: netdev this pool will serve (leave as NULL if none or multiple) * @napi: NAPI which is the sole consumer of pages, otherwise NULL * @dma_dir: DMA mapping direction * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV + * @slow: params with slowpath access only (initialization and Netlink) + * @netdev: netdev this pool will serve (leave as NULL if none or multiple) + * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL */ struct page_pool_params { struct_group_tagged(page_pool_params_fast, fast, - unsigned int flags; unsigned int order; unsigned int pool_size; int nid; @@ -70,6 +71,7 @@ struct page_pool_params { ); struct_group_tagged(page_pool_params_slow, slow, struct net_device *netdev; + unsigned int flags; /* private: used by test code only */ void (*init_callback)(struct page *page, void *arg); void *init_arg; @@ -130,12 +132,28 @@ struct page_pool { struct page_pool_params_fast p; int cpuid; - bool has_init_callback; + u32 pages_state_hold_cnt; + bool has_init_callback:1; /* slow::init_callback is set */ + bool dma_map:1; /* Perform DMA mapping */ + bool dma_sync:1; /* Perform DMA sync */ +#ifdef CONFIG_PAGE_POOL_STATS + bool system:1; /* This is a global percpu pool */ +#endif + + /* The following block must stay within one cacheline. On 32-bit + * systems, sizeof(long) == sizeof(int), so that the block size is + * ``3 * sizeof(long)``. On 64-bit systems, the actual size is + * ``2 * sizeof(long) + sizeof(int)``. The closest pow-2 to both of + * them is ``4 * sizeof(long)``, so just use that one for simplicity. + * Having it aligned to a cacheline boundary may be excessive and + * doesn't bring any good. + */ + __cacheline_group_begin(frag) __aligned(4 * sizeof(long)); long frag_users; struct page *frag_page; unsigned int frag_offset; - u32 pages_state_hold_cnt; + __cacheline_group_end(frag); struct delayed_work release_dw; void (*disconnect)(void *pool); @@ -213,7 +231,7 @@ struct xdp_mem_info; #ifdef CONFIG_PAGE_POOL void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), - struct xdp_mem_info *mem); + const struct xdp_mem_info *mem); void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count); #else @@ -223,7 +241,7 @@ static inline void page_pool_destroy(struct page_pool *pool) static inline void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), - struct xdp_mem_info *mem) + const struct xdp_mem_info *mem) { } diff --git a/include/net/pfcp.h b/include/net/pfcp.h new file mode 100644 index 0000000000..af14f970b8 --- /dev/null +++ b/include/net/pfcp.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PFCP_H_ +#define _PFCP_H_ + +#include <uapi/linux/if_ether.h> +#include <net/dst_metadata.h> +#include <linux/netdevice.h> +#include <uapi/linux/ipv6.h> +#include <net/udp_tunnel.h> +#include <uapi/linux/udp.h> +#include <uapi/linux/ip.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/bits.h> + +#define PFCP_PORT 8805 + +/* PFCP protocol header */ +struct pfcphdr { + u8 flags; + u8 message_type; + __be16 message_length; +}; + +/* PFCP header flags */ +#define PFCP_SEID_FLAG BIT(0) +#define PFCP_MP_FLAG BIT(1) + +#define PFCP_VERSION_MASK GENMASK(4, 0) + +#define PFCP_HLEN (sizeof(struct udphdr) + sizeof(struct pfcphdr)) + +/* PFCP node related messages */ +struct pfcphdr_node { + u8 seq_number[3]; + u8 reserved; +}; + +/* PFCP session related messages */ +struct pfcphdr_session { + __be64 seid; + u8 seq_number[3]; +#ifdef __LITTLE_ENDIAN_BITFIELD + u8 message_priority:4, + reserved:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:4, + message_priprity:4; +#else +#error "Please fix <asm/byteorder>" +#endif +}; + +struct pfcp_metadata { + u8 type; + __be64 seid; +} __packed; + +enum { + PFCP_TYPE_NODE = 0, + PFCP_TYPE_SESSION = 1, +}; + +#define PFCP_HEADROOM (sizeof(struct iphdr) + sizeof(struct udphdr) + \ + sizeof(struct pfcphdr) + sizeof(struct ethhdr)) +#define PFCP6_HEADROOM (sizeof(struct ipv6hdr) + sizeof(struct udphdr) + \ + sizeof(struct pfcphdr) + sizeof(struct ethhdr)) + +static inline struct pfcphdr *pfcp_hdr(struct sk_buff *skb) +{ + return (struct pfcphdr *)(udp_hdr(skb) + 1); +} + +static inline struct pfcphdr_node *pfcp_hdr_node(struct sk_buff *skb) +{ + return (struct pfcphdr_node *)(pfcp_hdr(skb) + 1); +} + +static inline struct pfcphdr_session *pfcp_hdr_session(struct sk_buff *skb) +{ + return (struct pfcphdr_session *)(pfcp_hdr(skb) + 1); +} + +static inline bool netif_is_pfcp(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "pfcp"); +} + +#endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a4ee43f493..41297bd38d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -74,6 +74,15 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block) return block && block->index; } +#ifdef CONFIG_NET_CLS_ACT +DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key); + +static inline bool tcf_block_bypass_sw(struct tcf_block *block) +{ + return block && block->bypass_wanted; +} +#endif + static inline struct Qdisc *tcf_block_q(struct tcf_block *block) { WARN_ON(tcf_block_shared(block)); diff --git a/include/net/proto_memory.h b/include/net/proto_memory.h new file mode 100644 index 0000000000..a6ab2f4f5e --- /dev/null +++ b/include/net/proto_memory.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _PROTO_MEMORY_H +#define _PROTO_MEMORY_H + +#include <net/sock.h> +#include <net/hotdata.h> + +/* 1 MB per cpu, in page units */ +#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT)) + +static inline bool sk_has_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure != NULL; +} + +static inline bool +proto_memory_pressure(const struct proto *prot) +{ + if (!prot->memory_pressure) + return false; + return !!READ_ONCE(*prot->memory_pressure); +} + +static inline bool sk_under_global_memory_pressure(const struct sock *sk) +{ + return proto_memory_pressure(sk->sk_prot); +} + +static inline bool sk_under_memory_pressure(const struct sock *sk) +{ + if (!sk->sk_prot->memory_pressure) + return false; + + if (mem_cgroup_sockets_enabled && sk->sk_memcg && + mem_cgroup_under_socket_pressure(sk->sk_memcg)) + return true; + + return !!READ_ONCE(*sk->sk_prot->memory_pressure); +} + +static inline long +proto_memory_allocated(const struct proto *prot) +{ + return max(0L, atomic_long_read(prot->memory_allocated)); +} + +static inline long +sk_memory_allocated(const struct sock *sk) +{ + return proto_memory_allocated(sk->sk_prot); +} + +static inline void proto_memory_pcpu_drain(struct proto *proto) +{ + int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0); + + if (val) + atomic_long_add(val, proto->memory_allocated); +} + +static inline void +sk_memory_allocated_add(const struct sock *sk, int val) +{ + struct proto *proto = sk->sk_prot; + + val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val); + + if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) + proto_memory_pcpu_drain(proto); +} + +static inline void +sk_memory_allocated_sub(const struct sock *sk, int val) +{ + struct proto *proto = sk->sk_prot; + + val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val); + + if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) + proto_memory_pcpu_drain(proto); +} + +#endif /* _PROTO_MEMORY_H */ diff --git a/include/net/red.h b/include/net/red.h index 425364de0d..802287d52c 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -233,10 +233,10 @@ static inline void red_set_parms(struct red_parms *p, int delta = qth_max - qth_min; u32 max_p_delta; - p->qth_min = qth_min << Wlog; - p->qth_max = qth_max << Wlog; - p->Wlog = Wlog; - p->Plog = Plog; + WRITE_ONCE(p->qth_min, qth_min << Wlog); + WRITE_ONCE(p->qth_max, qth_max << Wlog); + WRITE_ONCE(p->Wlog, Wlog); + WRITE_ONCE(p->Plog, Plog); if (delta <= 0) delta = 1; p->qth_delta = delta; @@ -244,7 +244,7 @@ static inline void red_set_parms(struct red_parms *p, max_P = red_maxp(Plog); max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */ } - p->max_P = max_P; + WRITE_ONCE(p->max_P, max_P); max_p_delta = max_P / delta; max_p_delta = max(max_p_delta, 1U); p->max_P_reciprocal = reciprocal_value(max_p_delta); @@ -257,7 +257,7 @@ static inline void red_set_parms(struct red_parms *p, p->target_min = qth_min + 2*delta; p->target_max = qth_min + 3*delta; - p->Scell_log = Scell_log; + WRITE_ONCE(p->Scell_log, Scell_log); p->Scell_max = (255 << Scell_log); if (stab) diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 004e651e60..ebcb8896bf 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -18,6 +18,7 @@ #include <linux/refcount.h> #include <net/sock.h> +#include <net/rstreason.h> struct request_sock; struct sk_buff; @@ -34,7 +35,8 @@ struct request_sock_ops { void (*send_ack)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); void (*send_reset)(const struct sock *sk, - struct sk_buff *skb); + struct sk_buff *skb, + enum sk_rst_reason reason); void (*destructor)(struct request_sock *req); void (*syn_ack_timeout)(const struct request_sock *req); }; @@ -127,12 +129,12 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb, } static inline struct request_sock * -reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, +reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, bool attach_listener) { struct request_sock *req; - req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); + req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); if (!req) return NULL; req->rsk_listener = NULL; @@ -157,6 +159,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, return req; } +#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) static inline void __reqsk_free(struct request_sock *req) { @@ -282,4 +285,16 @@ static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) return atomic_read(&queue->young); } +/* RFC 7323 2.3 Using the Window Scale Option + * The window field (SEG.WND) of every outgoing segment, with the + * exception of <SYN> segments, MUST be right-shifted by + * Rcv.Wind.Shift bits. + * + * This means the SEG.WND carried in SYNACK can not exceed 65535. + * We use this property to harden TCP stack while in NEW_SYN_RECV state. + */ +static inline u32 tcp_synack_window(const struct request_sock *req) +{ + return min(req->rsk_rcv_wnd, 65535U); +} #endif /* _REQUEST_SOCK_H */ diff --git a/include/net/route.h b/include/net/route.h index af55401aa8..93833cfe9c 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -35,8 +35,6 @@ #include <linux/cache.h> #include <linux/security.h> -#define RTO_ONLINK 0x01 - static inline __u8 ip_sock_rt_scope(const struct sock *sk) { if (sock_flag(sk, SOCK_LOCALROUTE)) @@ -152,15 +150,22 @@ static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 return ip_route_output_flow(net, flp, NULL); } +/* Simplistic IPv4 route lookup function. + * This is only suitable for some particular use cases: since the flowi4 + * structure is only partially set, it may bypass some fib-rules. + */ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, - __be32 saddr, u8 tos, int oif) + __be32 saddr, u8 tos, int oif, + __u8 scope) { struct flowi4 fl4 = { .flowi4_oif = oif, .flowi4_tos = tos, + .flowi4_scope = scope, .daddr = daddr, .saddr = saddr, }; + return ip_route_output_key(net, &fl4); } diff --git a/include/net/rps.h b/include/net/rps.h index 7660243e90..a93401d23d 100644 --- a/include/net/rps.h +++ b/include/net/rps.h @@ -122,4 +122,32 @@ static inline void sock_rps_record_flow(const struct sock *sk) #endif } +static inline u32 rps_input_queue_tail_incr(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + return ++sd->input_queue_tail; +#else + return 0; +#endif +} + +static inline void rps_input_queue_tail_save(u32 *dest, u32 tail) +{ +#ifdef CONFIG_RPS + WRITE_ONCE(*dest, tail); +#endif +} + +static inline void rps_input_queue_head_add(struct softnet_data *sd, int val) +{ +#ifdef CONFIG_RPS + WRITE_ONCE(sd->input_queue_head, sd->input_queue_head + val); +#endif +} + +static inline void rps_input_queue_head_incr(struct softnet_data *sd) +{ + rps_input_queue_head_add(sd, 1); +} + #endif /* _NET_RPS_H */ diff --git a/include/net/rstreason.h b/include/net/rstreason.h new file mode 100644 index 0000000000..2575c85d7f --- /dev/null +++ b/include/net/rstreason.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _LINUX_RSTREASON_H +#define _LINUX_RSTREASON_H +#include <net/dropreason-core.h> +#include <uapi/linux/mptcp.h> + +#define DEFINE_RST_REASON(FN, FNe) \ + FN(NOT_SPECIFIED) \ + FN(NO_SOCKET) \ + FN(TCP_INVALID_ACK_SEQUENCE) \ + FN(TCP_RFC7323_PAWS) \ + FN(TCP_TOO_OLD_ACK) \ + FN(TCP_ACK_UNSENT_DATA) \ + FN(TCP_FLAGS) \ + FN(TCP_OLD_ACK) \ + FN(TCP_ABORT_ON_DATA) \ + FN(TCP_TIMEWAIT_SOCKET) \ + FN(INVALID_SYN) \ + FN(MPTCP_RST_EUNSPEC) \ + FN(MPTCP_RST_EMPTCP) \ + FN(MPTCP_RST_ERESOURCE) \ + FN(MPTCP_RST_EPROHIBIT) \ + FN(MPTCP_RST_EWQ2BIG) \ + FN(MPTCP_RST_EBADPERF) \ + FN(MPTCP_RST_EMIDDLEBOX) \ + FN(ERROR) \ + FNe(MAX) + +/** + * enum sk_rst_reason - the reasons of socket reset + * + * The reasons of sk reset, which are used in DCCP/TCP/MPTCP protocols. + * + * There are three parts in order: + * 1) skb drop reasons: relying on drop reasons for such as passive reset + * 2) independent reset reasons: such as active reset reasons + * 3) reset reasons in MPTCP: only for MPTCP use + */ +enum sk_rst_reason { + /* Refer to include/net/dropreason-core.h + * Rely on skb drop reasons because it indicates exactly why RST + * could happen. + */ + /** @SK_RST_REASON_NOT_SPECIFIED: reset reason is not specified */ + SK_RST_REASON_NOT_SPECIFIED, + /** @SK_RST_REASON_NO_SOCKET: no valid socket that can be used */ + SK_RST_REASON_NO_SOCKET, + /** + * @SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ + * field because ack sequence is not in the window between snd_una + * and snd_nxt + */ + SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE, + /** + * @SK_RST_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to + * LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED + */ + SK_RST_REASON_TCP_RFC7323_PAWS, + /** @SK_RST_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */ + SK_RST_REASON_TCP_TOO_OLD_ACK, + /** + * @SK_RST_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't + * sent yet + */ + SK_RST_REASON_TCP_ACK_UNSENT_DATA, + /** @SK_RST_REASON_TCP_FLAGS: TCP flags invalid */ + SK_RST_REASON_TCP_FLAGS, + /** @SK_RST_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */ + SK_RST_REASON_TCP_OLD_ACK, + /** + * @SK_RST_REASON_TCP_ABORT_ON_DATA: abort on data + * corresponding to LINUX_MIB_TCPABORTONDATA + */ + SK_RST_REASON_TCP_ABORT_ON_DATA, + + /* Here start with the independent reasons */ + /** @SK_RST_REASON_TCP_TIMEWAIT_SOCKET: happen on the timewait socket */ + SK_RST_REASON_TCP_TIMEWAIT_SOCKET, + /** + * @SK_RST_REASON_INVALID_SYN: receive bad syn packet + * RFC 793 says if the state is not CLOSED/LISTEN/SYN-SENT then + * "fourth, check the SYN bit,...If the SYN is in the window it is + * an error, send a reset" + */ + SK_RST_REASON_INVALID_SYN, + + /* Copy from include/uapi/linux/mptcp.h. + * These reset fields will not be changed since they adhere to + * RFC 8684. So do not touch them. I'm going to list each definition + * of them respectively. + */ + /** + * @SK_RST_REASON_MPTCP_RST_EUNSPEC: Unspecified error. + * This is the default error; it implies that the subflow is no + * longer available. The presence of this option shows that the + * RST was generated by an MPTCP-aware device. + */ + SK_RST_REASON_MPTCP_RST_EUNSPEC, + /** + * @SK_RST_REASON_MPTCP_RST_EMPTCP: MPTCP-specific error. + * An error has been detected in the processing of MPTCP options. + * This is the usual reason code to return in the cases where a RST + * is being sent to close a subflow because of an invalid response. + */ + SK_RST_REASON_MPTCP_RST_EMPTCP, + /** + * @SK_RST_REASON_MPTCP_RST_ERESOURCE: Lack of resources. + * This code indicates that the sending host does not have enough + * resources to support the terminated subflow. + */ + SK_RST_REASON_MPTCP_RST_ERESOURCE, + /** + * @SK_RST_REASON_MPTCP_RST_EPROHIBIT: Administratively prohibited. + * This code indicates that the requested subflow is prohibited by + * the policies of the sending host. + */ + SK_RST_REASON_MPTCP_RST_EPROHIBIT, + /** + * @SK_RST_REASON_MPTCP_RST_EWQ2BIG: Too much outstanding data. + * This code indicates that there is an excessive amount of data + * that needs to be transmitted over the terminated subflow while + * having already been acknowledged over one or more other subflows. + * This may occur if a path has been unavailable for a short period + * and it is more efficient to reset and start again than it is to + * retransmit the queued data. + */ + SK_RST_REASON_MPTCP_RST_EWQ2BIG, + /** + * @SK_RST_REASON_MPTCP_RST_EBADPERF: Unacceptable performance. + * This code indicates that the performance of this subflow was + * too low compared to the other subflows of this Multipath TCP + * connection. + */ + SK_RST_REASON_MPTCP_RST_EBADPERF, + /** + * @SK_RST_REASON_MPTCP_RST_EMIDDLEBOX: Middlebox interference. + * Middlebox interference has been detected over this subflow, + * making MPTCP signaling invalid. For example, this may be sent + * if the checksum does not validate. + */ + SK_RST_REASON_MPTCP_RST_EMIDDLEBOX, + + /** @SK_RST_REASON_ERROR: unexpected error happens */ + SK_RST_REASON_ERROR, + + /** + * @SK_RST_REASON_MAX: Maximum of socket reset reasons. + * It shouldn't be used as a real 'reason'. + */ + SK_RST_REASON_MAX, +}; + +/* Convert skb drop reasons to enum sk_rst_reason type */ +static inline enum sk_rst_reason +sk_rst_convert_drop_reason(enum skb_drop_reason reason) +{ + switch (reason) { + case SKB_DROP_REASON_NOT_SPECIFIED: + return SK_RST_REASON_NOT_SPECIFIED; + case SKB_DROP_REASON_NO_SOCKET: + return SK_RST_REASON_NO_SOCKET; + case SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE: + return SK_RST_REASON_TCP_INVALID_ACK_SEQUENCE; + case SKB_DROP_REASON_TCP_RFC7323_PAWS: + return SK_RST_REASON_TCP_RFC7323_PAWS; + case SKB_DROP_REASON_TCP_TOO_OLD_ACK: + return SK_RST_REASON_TCP_TOO_OLD_ACK; + case SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: + return SK_RST_REASON_TCP_ACK_UNSENT_DATA; + case SKB_DROP_REASON_TCP_FLAGS: + return SK_RST_REASON_TCP_FLAGS; + case SKB_DROP_REASON_TCP_OLD_ACK: + return SK_RST_REASON_TCP_OLD_ACK; + case SKB_DROP_REASON_TCP_ABORT_ON_DATA: + return SK_RST_REASON_TCP_ABORT_ON_DATA; + default: + /* If we don't have our own corresponding reason */ + return SK_RST_REASON_NOT_SPECIFIED; + } +} +#endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 0014b9ee5e..79edd5b5e3 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -424,6 +424,7 @@ struct tcf_proto { */ spinlock_t lock; bool deleting; + bool counted; refcount_t refcnt; struct rcu_head rcu; struct hlist_node destroy_ht_node; @@ -473,6 +474,9 @@ struct tcf_block { struct flow_block flow_block; struct list_head owner_list; bool keep_dst; + bool bypass_wanted; + atomic_t filtercnt; /* Number of filters */ + atomic_t skipswcnt; /* Number of skip_sw filters */ atomic_t offloadcnt; /* Number of oddloaded filters */ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ diff --git a/include/net/scm.h b/include/net/scm.h index 92276a2c55..0d35c7c77a 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -23,10 +23,20 @@ struct scm_creds { kgid_t gid; }; +#ifdef CONFIG_UNIX +struct unix_edge; +#endif + struct scm_fp_list { short count; short count_unix; short max; +#ifdef CONFIG_UNIX + bool inflight; + bool dead; + struct list_head vertices; + struct unix_edge *edges; +#endif struct user_struct *user; struct file *fp[SCM_MAX_FD]; }; diff --git a/include/net/smc.h b/include/net/smc.h index c9dcb30e3f..db84e4e350 100644 --- a/include/net/smc.h +++ b/include/net/smc.h @@ -26,9 +26,6 @@ struct smc_hashinfo { struct hlist_head ht; }; -int smc_hash_sk(struct sock *sk); -void smc_unhash_sk(struct sock *sk); - /* SMCD/ISM device driver interface */ struct smcd_dmb { u64 dmb_tok; @@ -50,7 +47,6 @@ struct smcd_dmb { #define ISM_ERROR 0xFFFF struct smcd_dev; -struct ism_client; struct smcd_gid { u64 gid; @@ -61,14 +57,8 @@ struct smcd_ops { int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid, u32 vid_valid, u32 vid); int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb, - struct ism_client *client); + void *client); int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb); - int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id); - int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id); - int (*set_vlan_required)(struct smcd_dev *dev); - int (*reset_vlan_required)(struct smcd_dev *dev); - int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid, - u32 trigger_irq, u32 event_code, u64 info); int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx, bool sf, unsigned int offset, void *data, unsigned int size); @@ -76,11 +66,23 @@ struct smcd_ops { void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid); u16 (*get_chid)(struct smcd_dev *dev); struct device* (*get_dev)(struct smcd_dev *dev); + + /* optional operations */ + int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id); + int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id); + int (*set_vlan_required)(struct smcd_dev *dev); + int (*reset_vlan_required)(struct smcd_dev *dev); + int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid, + u32 trigger_irq, u32 event_code, u64 info); + int (*support_dmb_nocopy)(struct smcd_dev *dev); + int (*attach_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb); + int (*detach_dmb)(struct smcd_dev *dev, u64 token); }; struct smcd_dev { const struct smcd_ops *ops; void *priv; + void *client; struct list_head list; spinlock_t lock; struct smc_connection **conn; diff --git a/include/net/sock.h b/include/net/sock.h index 944f71a8ab..953c8dc4e2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1194,6 +1194,13 @@ static inline void sk_prot_clear_nulls(struct sock *sk, int size) size - offsetof(struct sock, sk_node.pprev)); } +struct proto_accept_arg { + int flags; + int err; + int is_empty; + bool kern; +}; + /* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface */ @@ -1208,8 +1215,8 @@ struct proto { int addr_len); int (*disconnect)(struct sock *sk, int flags); - struct sock * (*accept)(struct sock *sk, int flags, int *err, - bool kern); + struct sock * (*accept)(struct sock *sk, + struct proto_accept_arg *arg); int (*ioctl)(struct sock *sk, int cmd, int *karg); @@ -1371,75 +1378,6 @@ static inline int sk_under_cgroup_hierarchy(struct sock *sk, #endif } -static inline bool sk_has_memory_pressure(const struct sock *sk) -{ - return sk->sk_prot->memory_pressure != NULL; -} - -static inline bool sk_under_global_memory_pressure(const struct sock *sk) -{ - return sk->sk_prot->memory_pressure && - !!READ_ONCE(*sk->sk_prot->memory_pressure); -} - -static inline bool sk_under_memory_pressure(const struct sock *sk) -{ - if (!sk->sk_prot->memory_pressure) - return false; - - if (mem_cgroup_sockets_enabled && sk->sk_memcg && - mem_cgroup_under_socket_pressure(sk->sk_memcg)) - return true; - - return !!READ_ONCE(*sk->sk_prot->memory_pressure); -} - -static inline long -proto_memory_allocated(const struct proto *prot) -{ - return max(0L, atomic_long_read(prot->memory_allocated)); -} - -static inline long -sk_memory_allocated(const struct sock *sk) -{ - return proto_memory_allocated(sk->sk_prot); -} - -/* 1 MB per cpu, in page units */ -#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT)) -extern int sysctl_mem_pcpu_rsv; - -static inline void proto_memory_pcpu_drain(struct proto *proto) -{ - int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0); - - if (val) - atomic_long_add(val, proto->memory_allocated); -} - -static inline void -sk_memory_allocated_add(const struct sock *sk, int val) -{ - struct proto *proto = sk->sk_prot; - - val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val); - - if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv))) - proto_memory_pcpu_drain(proto); -} - -static inline void -sk_memory_allocated_sub(const struct sock *sk, int val) -{ - struct proto *proto = sk->sk_prot; - - val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val); - - if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv))) - proto_memory_pcpu_drain(proto); -} - #define SK_ALLOC_PERCPU_COUNTER_BATCH 16 static inline void sk_sockets_allocated_dec(struct sock *sk) @@ -1466,15 +1404,6 @@ proto_sockets_allocated_sum_positive(struct proto *prot) return percpu_counter_sum_positive(prot->sockets_allocated); } -static inline bool -proto_memory_pressure(struct proto *prot) -{ - if (!prot->memory_pressure) - return false; - return !!READ_ONCE(*prot->memory_pressure); -} - - #ifdef CONFIG_PROC_FS #define PROTO_INUSE_NR 64 /* should be enough for the first time */ struct prot_inuse { @@ -1882,7 +1811,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg, int sock_no_bind(struct socket *, struct sockaddr *, int); int sock_no_connect(struct socket *, struct sockaddr *, int, int); int sock_no_socketpair(struct socket *, struct socket *); -int sock_no_accept(struct socket *, struct socket *, int, bool); +int sock_no_accept(struct socket *, struct socket *, struct proto_accept_arg *); int sock_no_getname(struct socket *, struct sockaddr *, int); int sock_no_ioctl(struct socket *, unsigned int, unsigned long); int sock_no_listen(struct socket *, int); @@ -2508,6 +2437,12 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band) } } +static inline void sk_wake_async_rcu(const struct sock *sk, int how, int band) +{ + if (unlikely(sock_flag(sk, SOCK_FASYNC))) + sock_wake_async(rcu_dereference(sk->sk_wq), how, band); +} + /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. * Note: for send buffers, TCP works better if we can build two skbs at @@ -2824,12 +2759,10 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { skb = sk->sk_validate_xmit_skb(sk, dev, skb); -#ifdef CONFIG_TLS_DEVICE - } else if (unlikely(skb->decrypted)) { + } else if (unlikely(skb_is_decrypted(skb))) { pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); kfree_skb(skb); skb = NULL; -#endif } #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index 2bcf30381d..32815a40de 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -52,6 +52,8 @@ extern struct inet_hashinfo tcp_hashinfo; DECLARE_PER_CPU(unsigned int, tcp_orphan_count); int tcp_orphan_count_sum(void); +DECLARE_PER_CPU(u32, tcp_tw_isn); + void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) @@ -294,14 +296,6 @@ static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) return seq3 - seq2 >= seq1 - seq2; } -static inline bool tcp_out_of_memory(struct sock *sk) -{ - if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && - sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) - return true; - return false; -} - static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { sk_wmem_queued_add(sk, -skb->truesize); @@ -314,7 +308,7 @@ static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) void sk_forced_mem_schedule(struct sock *sk, int size); -bool tcp_check_oom(struct sock *sk, int shift); +bool tcp_check_oom(const struct sock *sk, int shift); extern struct proto tcp_prot; @@ -353,7 +347,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); void tcp_rcv_space_adjust(struct sock *sk); int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); void tcp_twsk_destructor(struct sock *sk); -void tcp_twsk_purge(struct list_head *net_exit_list, int family); +void tcp_twsk_purge(struct list_head *net_exit_list); ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); @@ -392,7 +386,8 @@ enum tcp_tw_status { enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, - const struct tcphdr *th); + const struct tcphdr *th, + u32 *tw_isn); struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req, bool fastopen, bool *lost_race); @@ -667,7 +662,8 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, void tcp_send_probe0(struct sock *); int tcp_write_wakeup(struct sock *, int mib); void tcp_send_fin(struct sock *sk); -void tcp_send_active_reset(struct sock *sk, gfp_t priority); +void tcp_send_active_reset(struct sock *sk, gfp_t priority, + enum sk_rst_reason reason); int tcp_send_synack(struct sock *); void tcp_push_one(struct sock *, unsigned int mss_now); void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); @@ -681,6 +677,7 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, /* tcp_input.c */ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); +void tcp_done_with_error(struct sock *sk, int err); void tcp_reset(struct sock *sk, struct sk_buff *skb); void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); @@ -742,7 +739,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu); int tcp_mss_to_mtu(struct sock *sk, int mss); void tcp_mtup_init(struct sock *sk); -static inline void tcp_bound_rto(const struct sock *sk) +static inline void tcp_bound_rto(struct sock *sk) { if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) inet_csk(sk)->icsk_rto = TCP_RTO_MAX; @@ -925,6 +922,19 @@ static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq) #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) +/* State flags for sacked in struct tcp_skb_cb */ +enum tcp_skb_cb_sacked_flags { + TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */ + TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */ + TCPCB_LOST = (1 << 2), /* SKB is lost */ + TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS | + TCPCB_LOST), /* All tag bits */ + TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */ + TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */ + TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS | + TCPCB_REPAIRED), +}; + /* This is what the send packet queuing engine uses to pass * TCP per-packet control information to the transmission code. * We also store the host-order sequence numbers in here too. @@ -935,13 +945,10 @@ struct tcp_skb_cb { __u32 seq; /* Starting sequence number */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */ union { - /* Note : tcp_tw_isn is used in input path only - * (isn chosen by tcp_timewait_state_process()) - * + /* Note : * tcp_gso_segs/size are used in write queue only, * cf tcp_skb_pcount()/tcp_skb_mss() */ - __u32 tcp_tw_isn; struct { u16 tcp_gso_segs; u16 tcp_gso_size; @@ -950,15 +957,6 @@ struct tcp_skb_cb { __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK. */ -#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ -#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ -#define TCPCB_LOST 0x04 /* SKB is lost */ -#define TCPCB_TAGBITS 0x07 /* All tag bits */ -#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */ -#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ -#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ - TCPCB_REPAIRED) - __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ eor:1, /* Is skb MSG_EOR marked? */ @@ -1167,7 +1165,7 @@ struct tcp_congestion_ops { /* call when packets are delivered to update cwnd and pacing rate, * after all the ca_state processing. (optional) */ - void (*cong_control)(struct sock *sk, const struct rate_sample *rs); + void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs); /* new value of cwnd after loss (required) */ @@ -2194,7 +2192,10 @@ void tcp_v4_destroy_sock(struct sock *sk); struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features); -struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); +struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb); +struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th); +struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, + struct tcphdr *th); INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff)); @@ -2283,7 +2284,8 @@ struct tcp_request_sock_ops { struct dst_entry *(*route_req)(const struct sock *sk, struct sk_buff *skb, struct flowi *fl, - struct request_sock *req); + struct request_sock *req, + u32 tw_isn); u32 (*init_seq)(const struct sk_buff *skb); u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb); int (*send_synack)(const struct sock *sk, struct dst_entry *dst, @@ -2705,10 +2707,10 @@ static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); } -static inline void tcp_bpf_rtt(struct sock *sk) +static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt) { if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) - tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL); + tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt); } #if IS_ENABLED(CONFIG_SMC) diff --git a/include/net/tcx.h b/include/net/tcx.h index 0a5f40a91c..5ce0ce9e0c 100644 --- a/include/net/tcx.h +++ b/include/net/tcx.h @@ -75,9 +75,9 @@ tcx_entry_fetch(struct net_device *dev, bool ingress) return rcu_dereference_rtnl(dev->tcx_egress); } -static inline struct bpf_mprog_entry *tcx_entry_create(void) +static inline struct bpf_mprog_entry *tcx_entry_create_noprof(void) { - struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL); + struct tcx_entry *tcx = kzalloc_noprof(sizeof(*tcx), GFP_KERNEL); if (tcx) { bpf_mprog_bundle_init(&tcx->bundle); @@ -85,6 +85,7 @@ static inline struct bpf_mprog_entry *tcx_entry_create(void) } return NULL; } +#define tcx_entry_create(...) alloc_hooks(tcx_entry_create_noprof(__VA_ARGS__)) static inline void tcx_entry_free(struct bpf_mprog_entry *entry) { diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h index 74d2b463cc..62b3e9f2ae 100644 --- a/include/net/timewait_sock.h +++ b/include/net/timewait_sock.h @@ -15,18 +15,9 @@ struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; - int (*twsk_unique)(struct sock *sk, - struct sock *sktw, void *twp); void (*twsk_destructor)(struct sock *sk); }; -static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) -{ - if (sk->sk_prot->twsk_prot->twsk_unique != NULL) - return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp); - return 0; -} - static inline void twsk_destructor(struct sock *sk) { if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) diff --git a/include/net/tls.h b/include/net/tls.h index 33f657d3c0..3a33924db2 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -362,7 +362,7 @@ static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb) static inline struct tls_context *tls_get_ctx(const struct sock *sk) { - struct inet_connection_sock *icsk = inet_csk(sk); + const struct inet_connection_sock *icsk = inet_csk(sk); /* Use RCU on icsk_ulp_data only for sock diag code, * TLS data path doesn't need rcu_dereference(). diff --git a/include/net/udp.h b/include/net/udp.h index 488a6d2bab..c4e05b14b6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -379,14 +379,7 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb) static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, struct iov_iter *to) { - int n; - - n = copy_to_iter(skb->data + off, len, to); - if (n == len) - return 0; - - iov_iter_revert(to, n); - return -EFAULT; + return copy_to_iter_full(skb->data + off, len, to) ? 0 : -EFAULT; } /* diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index d716214fe0..a93dc51f63 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -179,8 +179,8 @@ struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb, struct dst_cache *dst_cache); struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, - __be16 flags, __be64 tunnel_id, - int md_size); + const unsigned long *flags, + __be64 tunnel_id, int md_size); #ifdef CONFIG_INET static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index c9aec9ab61..0a5dca2b2b 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -219,13 +219,10 @@ static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool return meta; } -static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); - if (!pool->dma_need_sync) - return; - xp_dma_sync_for_cpu(xskb); } @@ -402,7 +399,7 @@ static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool return NULL; } -static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) { } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index cb4841a9ff..7d4c223525 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -178,7 +178,10 @@ struct xfrm_state { struct hlist_node gclist; struct hlist_node bydst; }; - struct hlist_node bysrc; + union { + struct hlist_node dev_gclist; + struct hlist_node bysrc; + }; struct hlist_node byspi; struct hlist_node byseq; @@ -291,6 +294,7 @@ struct xfrm_state { /* Private data of this transformer, format is opaque, * interpreted by xfrm_type methods. */ void *data; + u8 dir; }; static inline struct net *xs_net(struct xfrm_state *x) @@ -1587,7 +1591,7 @@ void xfrm_state_update_stats(struct net *net); static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) { struct xfrm_dev_offload *xdo = &x->xso; - struct net_device *dev = xdo->dev; + struct net_device *dev = READ_ONCE(xdo->dev); if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_state_update_stats) @@ -1945,13 +1949,16 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, struct xfrm_user_offload *xuo, u8 dir, struct netlink_ext_ack *extack); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); +void xfrm_dev_state_delete(struct xfrm_state *x); +void xfrm_dev_state_free(struct xfrm_state *x); static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) { struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = READ_ONCE(xso->dev); - if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn) - xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); + if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn) + dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); } static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) @@ -1972,28 +1979,6 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) return false; } -static inline void xfrm_dev_state_delete(struct xfrm_state *x) -{ - struct xfrm_dev_offload *xso = &x->xso; - - if (xso->dev) - xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); -} - -static inline void xfrm_dev_state_free(struct xfrm_state *x) -{ - struct xfrm_dev_offload *xso = &x->xso; - struct net_device *dev = xso->dev; - - if (dev && dev->xfrmdev_ops) { - if (dev->xfrmdev_ops->xdo_dev_state_free) - dev->xfrmdev_ops->xdo_dev_state_free(x); - xso->dev = NULL; - xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; - netdev_put(dev, &xso->dev_tracker); - } -} - static inline void xfrm_dev_policy_delete(struct xfrm_policy *x) { struct xfrm_dev_offload *xdo = &x->xdo; diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 99dd7376df..bacb33f1e3 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -43,7 +43,6 @@ struct xsk_dma_map { refcount_t users; struct list_head list; /* Protected by the RTNL_LOCK */ u32 dma_pages_cnt; - bool dma_need_sync; }; struct xsk_buff_pool { @@ -82,7 +81,6 @@ struct xsk_buff_pool { u8 tx_metadata_len; /* inherited from umem */ u8 cached_need_wakeup; bool uses_need_wakeup; - bool dma_need_sync; bool unaligned; bool tx_sw_csum; void *addrs; @@ -155,21 +153,17 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) return xskb->frame_dma; } -void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) { - xp_dma_sync_for_cpu_slow(xskb); + dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma, + xskb->pool->frame_len, + DMA_BIDIRECTIONAL); } -void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, - size_t size); static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, dma_addr_t dma, size_t size) { - if (!pool->dma_need_sync) - return; - - xp_dma_sync_for_device_slow(pool, dma, size); + dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL); } /* Masks for xdp_umem_page flags. diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h index c011ea236e..7c47151d5c 100644 --- a/include/ras/ras_event.h +++ b/include/ras/ras_event.h @@ -61,7 +61,7 @@ TRACE_EVENT(extlog_mem_event, else __entry->pa_mask_lsb = ~0; __entry->fru_id = *fru_id; - __assign_str(fru_text, fru_text); + __assign_str(fru_text); cper_mem_err_pack(mem, &__entry->data); ), @@ -131,8 +131,8 @@ TRACE_EVENT(mc_event, TP_fast_assign( __entry->error_type = err_type; - __assign_str(msg, error_msg); - __assign_str(label, label); + __assign_str(msg); + __assign_str(label); __entry->error_count = error_count; __entry->mc_index = mc_index; __entry->top_layer = top_layer; @@ -141,7 +141,7 @@ TRACE_EVENT(mc_event, __entry->address = address; __entry->grain_bits = grain_bits; __entry->syndrome = syndrome; - __assign_str(driver_detail, driver_detail); + __assign_str(driver_detail); ), TP_printk("%d %s error%s:%s%s on %s (mc:%d location:%d:%d:%d address:0x%08lx grain:%d syndrome:0x%08lx%s%s)", @@ -239,7 +239,7 @@ TRACE_EVENT(non_standard_event, TP_fast_assign( memcpy(__entry->sec_type, sec_type, UUID_SIZE); memcpy(__entry->fru_id, fru_id, UUID_SIZE); - __assign_str(fru_text, fru_text); + __assign_str(fru_text); __entry->sev = sev; __entry->len = len; memcpy(__get_dynamic_array(buf), err, len); @@ -313,7 +313,7 @@ TRACE_EVENT(aer_event, ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __assign_str(dev_name); __entry->status = status; __entry->severity = severity; __entry->tlp_header_valid = tlp_header_valid; diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 2e58d5e6ac..d678929441 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -11,6 +11,7 @@ #include <rdma/ib_verbs.h> #include <rdma/rdmavt_cq.h> #include <rdma/rvt-abi.h> +#include <linux/vmalloc.h> /* * Atomic bit definitions for r_aflags. */ diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 8b7c46daeb..0d69ded73b 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -14,6 +14,9 @@ #include <uapi/rdma/rdma_netlink.h> #include <linux/xarray.h> +/* Mark entry as containing driver specific details, it is used to provide QP subtype for now */ +#define RESTRACK_DD XA_MARK_1 + struct ib_device; struct sk_buff; @@ -116,8 +119,8 @@ struct rdma_restrack_entry { u32 id; }; -int rdma_restrack_count(struct ib_device *dev, - enum rdma_restrack_type type); +int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, + bool show_details); /** * rdma_is_kernel_res() - check the owner of resource * @res: resource entry diff --git a/include/scsi/iser.h b/include/scsi/iser.h index 2e678fa74e..07a83bfa2b 100644 --- a/include/scsi/iser.h +++ b/include/scsi/iser.h @@ -63,7 +63,7 @@ struct iser_cm_hdr { * @rsvd: reserved * @write_stag: write rkey * @write_va: write virtual address - * @reaf_stag: read rkey + * @read_stag: read rkey * @read_va: read virtual address */ struct iser_ctrl { diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index eca6fd42d7..4a9b4169e0 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -44,11 +44,16 @@ * @LPORT_ST_DISABLED: Disabled * @LPORT_ST_FLOGI: Fabric login (FLOGI) sent * @LPORT_ST_DNS: Waiting for name server remote port to become ready - * @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent + * @LPORT_ST_RNN_ID: Register port name by ID (RNN_ID) sent + * @LPORT_ST_RSNN_NN: Waiting for host symbolic node name + * @LPORT_ST_RSPN_ID: Waiting for host symbolic port name * @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent * @LPORT_ST_RFF_ID: Register FC-4 Features by ID (RFF_ID) sent * @LPORT_ST_FDMI: Waiting for mgmt server rport to become ready - * @LPORT_ST_RHBA: + * @LPORT_ST_RHBA: Register HBA + * @LPORT_ST_RPA: Register Port Attributes + * @LPORT_ST_DHBA: Deregister HBA + * @LPORT_ST_DPRT: Deregister Port * @LPORT_ST_SCR: State Change Register (SCR) sent * @LPORT_ST_READY: Ready for use * @LPORT_ST_LOGO: Local port logout (LOGO) sent @@ -183,7 +188,7 @@ struct fc_rport_libfc_priv { * @r_a_tov: Resource allocation timeout value (in msec) * @rp_mutex: The mutex that protects the remote port * @retry_work: Handle for retries - * @event_callback: Callback when READY, FAILED or LOGO states complete + * @lld_event_callback: Callback when READY, FAILED or LOGO states complete * @prli_count: Count of open PRLI sessions in providers * @rcu: Structure used for freeing in an RCU-safe manner */ @@ -289,6 +294,7 @@ struct fc_seq_els_data { * @timer: The command timer * @tm_done: Completion indicator * @wait_for_comp: Indicator to wait for completion of the I/O (in jiffies) + * @timer_delay: FCP packet timer delay in jiffies * @data_len: The length of the data * @cdb_cmd: The CDB command * @xfer_len: The transfer length @@ -788,6 +794,8 @@ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *); /** * fc_lport_test_ready() - Determine if a local port is in the READY state * @lport: The local port to test + * + * Returns: %true if local port is in the READY state, %false otherwise */ static inline int fc_lport_test_ready(struct fc_lport *lport) { @@ -830,6 +838,8 @@ static inline void fc_lport_state_enter(struct fc_lport *lport, /** * fc_lport_init_stats() - Allocate per-CPU statistics for a local port * @lport: The local port whose statistics are to be initialized + * + * Returns: %0 on success, %-ENOMEM on failure */ static inline int fc_lport_init_stats(struct fc_lport *lport) { @@ -851,6 +861,8 @@ static inline void fc_lport_free_stats(struct fc_lport *lport) /** * lport_priv() - Return the private data from a local port * @lport: The local port whose private data is to be retrieved + * + * Returns: the local port's private data pointer */ static inline void *lport_priv(const struct fc_lport *lport) { diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 8300ef1a98..3c5899290a 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -157,7 +157,9 @@ struct fcoe_ctlr { /** * fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr - * @cltr: The fcoe_ctlr whose private data will be returned + * @ctlr: The fcoe_ctlr whose private data will be returned + * + * Returns: pointer to the private data */ static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr) { @@ -174,7 +176,6 @@ static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr) * struct fcoe_fcf - Fibre-Channel Forwarder * @list: list linkage * @event_work: Work for FC Transport actions queue - * @event: The event to be processed * @fip: The controller that the FCF was discovered on * @fcf_dev: The associated fcoe_fcf_device instance * @time: system time (jiffies) when an advertisement was last received @@ -188,6 +189,7 @@ static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr) * @flogi_sent: current FLOGI sent to this FCF * @flags: flags received from advertisement * @fka_period: keep-alive period, in jiffies + * @fd_flags: no need for FKA from ENode * * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that * passes FCoE frames on to an FC fabric. This structure represents @@ -222,6 +224,7 @@ struct fcoe_fcf { /** * struct fcoe_rport - VN2VN remote port + * @rdata: libfc remote port private data * @time: time of create or last beacon packet received from node * @fcoe_len: max FCoE frame size, not including VLAN or Ethernet headers * @flags: flags from probe or claim @@ -266,8 +269,10 @@ void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev); /** - * is_fip_mode() - returns true if FIP mode selected. + * is_fip_mode() - test if FIP mode selected. * @fip: FCoE controller. + * + * Returns: %true if FIP mode is selected */ static inline bool is_fip_mode(struct fcoe_ctlr *fip) { @@ -318,9 +323,10 @@ struct fcoe_transport { * @kthread: The thread context (used by bnx2fc) * @work: The work item (used by fcoe) * @fcoe_rx_list: The queue of pending packets to process - * @page: The memory page for calculating frame trailer CRCs + * @crc_eof_page: The memory page for calculating frame trailer CRCs * @crc_eof_offset: The offset into the CRC page pointing to available * memory for a new trailer + * @lock: local lock for members of this struct */ struct fcoe_percpu_s { struct task_struct *kthread; @@ -343,7 +349,8 @@ struct fcoe_percpu_s { * @timer: The queue timer * @destroy_work: Handle for work context * (to prevent RTNL deadlocks) - * @data_srt_addr: Source address for data + * @data_src_addr: Source address for data + * @get_netdev: function that returns a &net_device from @lport * * An instance of this structure is to be allocated along with the * Scsi_Host and libfc fc_lport structures. @@ -364,6 +371,8 @@ struct fcoe_port { /** * fcoe_get_netdev() - Return the net device associated with a local port * @lport: The local port to get the net device from + * + * Returns: the &net_device associated with this @lport */ static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport) { @@ -383,8 +392,10 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *); void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *); /** - * struct netdev_list - * A mapping from netdevice to fcoe_transport + * struct fcoe_netdev_mapping - A mapping from &net_device to &fcoe_transport + * @list: list linkage of the mappings + * @netdev: the &net_device + * @ft: the fcoe_transport associated with @netdev */ struct fcoe_netdev_mapping { struct list_head list; diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index f5257103fd..1324068dd9 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -683,7 +683,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset); int sas_phy_enable(struct sas_phy *phy, int enable); extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); extern int sas_target_alloc(struct scsi_target *); -extern int sas_slave_configure(struct scsi_device *); +int sas_device_configure(struct scsi_device *dev, + struct queue_limits *lim); extern int sas_change_queue_depth(struct scsi_device *, int new_depth); extern int sas_bios_param(struct scsi_device *, struct block_device *, sector_t capacity, int *hsc); @@ -726,4 +727,33 @@ void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, gfp_t gfp_flags); +#define __LIBSAS_SHT_BASE \ + .module = THIS_MODULE, \ + .name = DRV_NAME, \ + .proc_name = DRV_NAME, \ + .queuecommand = sas_queuecommand, \ + .dma_need_drain = ata_scsi_dma_need_drain, \ + .target_alloc = sas_target_alloc, \ + .change_queue_depth = sas_change_queue_depth, \ + .bios_param = sas_bios_param, \ + .this_id = -1, \ + .eh_device_reset_handler = sas_eh_device_reset_handler, \ + .eh_target_reset_handler = sas_eh_target_reset_handler, \ + .target_destroy = sas_target_destroy, \ + .ioctl = sas_ioctl, \ + +#ifdef CONFIG_COMPAT +#define _LIBSAS_SHT_BASE __LIBSAS_SHT_BASE \ + .compat_ioctl = sas_ioctl, +#else +#define _LIBSAS_SHT_BASE __LIBSAS_SHT_BASE +#endif + +#define LIBSAS_SHT_BASE _LIBSAS_SHT_BASE \ + .device_configure = sas_device_configure, \ + .slave_alloc = sas_slave_alloc, \ + +#define LIBSAS_SHT_BASE_NO_SLAVE_INIT _LIBSAS_SHT_BASE + + #endif /* _SASLIB_H_ */ diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index 2f8c719840..92e27e7bf0 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h @@ -39,6 +39,9 @@ int smp_ata_check_ready_type(struct ata_link *link); int sas_discover_sata(struct domain_device *dev); int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, struct domain_device *child, int phy_id); + +extern const struct attribute_group sas_ata_sdev_attr_group; + #else static inline void sas_ata_disabled_notice(void) @@ -123,6 +126,9 @@ static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *p sas_ata_disabled_notice(); return -ENODEV; } + +#define sas_ata_sdev_attr_group ((struct attribute_group) {}) + #endif #endif /* _SAS_ATA_H_ */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 4498f845b1..96b3503666 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -7,8 +7,9 @@ #define _SCSI_SCSI_H #include <linux/types.h> -#include <linux/scatterlist.h> -#include <linux/kernel.h> + +#include <asm/param.h> + #include <scsi/scsi_common.h> #include <scsi/scsi_proto.h> #include <scsi/scsi_status.h> @@ -69,7 +70,7 @@ static inline int scsi_is_wlun(u64 lun) * @status: the status passed up from the driver (including host and * driver components) * - * This returns true if the status code is SAM_STAT_CHECK_CONDITION. + * Returns: %true if the status code is SAM_STAT_CHECK_CONDITION. */ static inline int scsi_status_is_check_condition(int status) { @@ -189,12 +190,13 @@ enum scsi_disposition { /* Used to obtain the PCI location of a device */ #define SCSI_IOCTL_GET_PCI 0x5387 -/** scsi_status_is_good - check the status return. +/** + * scsi_status_is_good - check the status return. * * @status: the status passed up from the driver (including host and * driver components) * - * This returns true for known good conditions that may be treated as + * Returns: %true for known good conditions that may be treated as * command completed normally */ static inline bool scsi_status_is_good(int status) diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 526def14e7..45c40d2001 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -353,6 +353,8 @@ static inline u8 get_host_byte(struct scsi_cmnd *cmd) /** * scsi_msg_to_host_byte() - translate message byte + * @cmd: the SCSI command + * @msg: the SCSI parallel message byte to translate * * Translate the SCSI parallel message byte to a matching * host byte setting. A message of COMMAND_COMPLETE indicates diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h index f40915d2ec..c0e89996bd 100644 --- a/include/scsi/scsi_driver.h +++ b/include/scsi/scsi_driver.h @@ -23,7 +23,9 @@ struct scsi_driver { #define to_scsi_driver(drv) \ container_of((drv), struct scsi_driver, gendrv) -extern int scsi_register_driver(struct device_driver *); +#define scsi_register_driver(drv) \ + __scsi_register_driver(drv, THIS_MODULE) +int __scsi_register_driver(struct device_driver *, struct module *); #define scsi_unregister_driver(drv) \ driver_unregister(drv); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 129001f600..19a1c5c489 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -211,7 +211,11 @@ struct scsi_host_template { * up after yourself before returning non-0 * * Status: OPTIONAL + * + * Note: slave_configure is the legacy version, use device_configure for + * all new code. A driver must never define both. */ + int (* device_configure)(struct scsi_device *, struct queue_limits *lim); int (* slave_configure)(struct scsi_device *); /* @@ -405,6 +409,8 @@ struct scsi_host_template { */ unsigned int max_segment_size; + unsigned int dma_alignment; + /* * DMA scatter gather segment boundary limit. A segment crossing this * boundary will be split in two. @@ -614,6 +620,7 @@ struct Scsi_Host { unsigned int max_sectors; unsigned int opt_sectors; unsigned int max_segment_size; + unsigned int dma_alignment; unsigned long dma_boundary; unsigned long virt_boundary_mask; /* @@ -665,6 +672,8 @@ struct Scsi_Host { /* The transport requires the LUN bits NOT to be stored in CDB[1] */ unsigned no_scsi2_lun_in_cdb:1; + unsigned no_highmem:1; + /* * Optional work queue to be utilized by the transport */ diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index a0458bda31..1394cf313b 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h @@ -83,6 +83,6 @@ scsi_transport_device_data(struct scsi_device *sdev) + shost->transportt->device_private_offset; } -void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q); +void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim); #endif /* SCSI_TRANSPORT_H */ diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 483513c575..4b884b8013 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -709,6 +709,7 @@ struct fc_function_template { int (*vport_delete)(struct fc_vport *); /* bsg support */ + u32 max_bsg_segments; int (*bsg_request)(struct bsg_job *); int (*bsg_timeout)(struct bsg_job *); @@ -770,10 +771,9 @@ struct fc_function_template { /** * fc_remote_port_chkready - called to validate the remote port state * prior to initiating io to the port. - * - * Returns a scsi result code that can be returned by the LLDD. - * * @rport: remote port to be checked + * + * Returns: a scsi result code that can be returned by the LLDD. **/ static inline int fc_remote_port_chkready(struct fc_rport *rport) diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h index dfc78aa112..5b70b53844 100644 --- a/include/scsi/scsi_transport_srp.h +++ b/include/scsi/scsi_transport_srp.h @@ -74,7 +74,7 @@ struct srp_rport { }; /** - * struct srp_function_template + * struct srp_function_template - template for SRP initiator drivers * * Fields that are only relevant for SRP initiator drivers: * @has_rport_state: Whether or not to create the state, fast_io_fail_tmo and @@ -124,7 +124,7 @@ enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd); * srp_chkready() - evaluate the transport layer state before I/O * @rport: SRP target port pointer. * - * Returns a SCSI result code that can be returned by the LLD queuecommand() + * Returns: a SCSI result code that can be returned by the LLD queuecommand() * implementation. The role of this function is similar to that of * fc_remote_port_chkready(). */ diff --git a/include/soc/fsl/dcp.h b/include/soc/fsl/dcp.h new file mode 100644 index 0000000000..3ec335d8ca --- /dev/null +++ b/include/soc/fsl/dcp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 sigma star gmbh + * + * Specifies paes key slot handles for NXP's DCP (Data Co-Processor) to be used + * with the crypto_skcipher_setkey(). + */ + +#ifndef MXS_DCP_H +#define MXS_DCP_H + +#define DCP_PAES_KEYSIZE 1 +#define DCP_PAES_KEY_SLOT0 0x00 +#define DCP_PAES_KEY_SLOT1 0x01 +#define DCP_PAES_KEY_SLOT2 0x02 +#define DCP_PAES_KEY_SLOT3 0x03 +#define DCP_PAES_KEY_UNIQUE 0xfe +#define DCP_PAES_KEY_OTP 0xff + +#endif /* MXS_DCP_H */ diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h index 09722f83b0..0bd67e10b7 100644 --- a/include/soc/microchip/mpfs.h +++ b/include/soc/microchip/mpfs.h @@ -43,11 +43,11 @@ struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_ #endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */ #if IS_ENABLED(CONFIG_MCHP_CLK_MPFS) - -u32 mpfs_reset_read(struct device *dev); - -void mpfs_reset_write(struct device *dev, u32 val); - +#if IS_ENABLED(CONFIG_RESET_POLARFIRE_SOC) +int mpfs_reset_controller_register(struct device *clk_dev, void __iomem *base); +#else +static inline int mpfs_reset_controller_register(struct device *clk_dev, void __iomem *base) { return 0; } +#endif /* if IS_ENABLED(CONFIG_RESET_POLARFIRE_SOC) */ #endif /* if IS_ENABLED(CONFIG_MCHP_CLK_MPFS) */ #endif /* __SOC_MPFS_H__ */ diff --git a/include/sound/control.h b/include/sound/control.h index 9a4f4f7138..c1659036c4 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -167,6 +167,29 @@ snd_ctl_find_id_mixer(struct snd_card *card, const char *name) return snd_ctl_find_id(card, &id); } +/** + * snd_ctl_find_id_mixer_locked - find the control instance with the given name string + * @card: the card instance + * @name: the name string + * + * Finds the control instance with the given name and + * @SNDRV_CTL_ELEM_IFACE_MIXER. Other fields are set to zero. + * + * This is merely a wrapper to snd_ctl_find_id_locked(). + * The caller must down card->controls_rwsem before calling this function. + * + * Return: The pointer of the instance if found, or %NULL if not. + */ +static inline struct snd_kcontrol * +snd_ctl_find_id_mixer_locked(struct snd_card *card, const char *name) +{ + struct snd_ctl_elem_id id = {}; + + id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; + strscpy(id.name, name, sizeof(id.name)); + return snd_ctl_find_id_locked(card, &id); +} + int snd_ctl_create(struct snd_card *card); int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn); diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h index 68e053fe73..bb70782d15 100644 --- a/include/sound/cs35l41.h +++ b/include/sound/cs35l41.h @@ -554,6 +554,11 @@ #define CS35L41_LRCLK_FRC_SHIFT 1 #define CS35L41_AMP_GAIN_PCM_MASK 0x3E0 +#define CS35L41_AMP_GAIN_PCM_SHIFT 5 +#define CS35L41_AMP_GAIN_PDM_MASK 0x1F +#define CS35L41_AMP_GAIN_PDM_SHIFT 0 +#define CS35L41_AMP_GAIN_PCM_MAX 20 +#define CS35L41_AMP_GAIN_PDM_MAX 20 #define CS35L41_AMP_GAIN_ZC_MASK 0x0400 #define CS35L41_AMP_GAIN_ZC_SHIFT 10 diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 1a3c6f66f6..dc627ebf01 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -209,7 +209,7 @@ /* CS35L56_MAIN_RENDER_USER_VOLUME */ #define CS35L56_MAIN_RENDER_USER_VOLUME_MIN -400 -#define CS35L56_MAIN_RENDER_USER_VOLUME_MAX 400 +#define CS35L56_MAIN_RENDER_USER_VOLUME_MAX 48 #define CS35L56_MAIN_RENDER_USER_VOLUME_MASK 0x0000FFC0 #define CS35L56_MAIN_RENDER_USER_VOLUME_SHIFT 6 #define CS35L56_MAIN_RENDER_USER_VOLUME_SIGNBIT 9 diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index 94dbb23580..f6baa9a018 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -119,6 +119,7 @@ int snd_dmaengine_pcm_refine_runtime_hwparams( * which do not use devicetree. * @process: Callback used to apply processing on samples transferred from/to * user space. + * @name: Component name. If null, dev_name will be used. * @compat_filter_fn: Will be used as the filter function when requesting a * channel for platforms which do not use devicetree. The filter parameter * will be the DAI's DMA data. @@ -144,6 +145,7 @@ struct snd_dmaengine_pcm_config { int (*process)(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, unsigned long bytes); + const char *name; dma_filter_fn compat_filter_fn; struct device *dma_dev; const char *chan_names[SNDRV_PCM_STREAM_LAST + 1]; diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 234b5baea6..38db50b280 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -598,17 +598,25 @@ SUB_REG(PEFE, FILTERAMOUNT, 0x000000ff) /* Filter envlope amount */ // In stereo mode, the two channels' caches are concatenated into one, // and hold the interleaved frames. // The cache holds 64 frames, so the upper half is not used in 8-bit mode. -// All registers mentioned below count in frames. -// The cache is a ring buffer; CCR_READADDRESS operates modulo 64. -// The cache is filled from (CCCA_CURRADDR - CCR_CACHEINVALIDSIZE) -// into (CCR_READADDRESS - CCR_CACHEINVALIDSIZE). +// All registers mentioned below count in frames. Shortcuts: +// CA = CCCA_CURRADDR, CRA = CCR_READADDRESS, +// CLA = CCR_CACHELOOPADDRHI:CLP_CACHELOOPADDR, +// CIS = CCR_CACHEINVALIDSIZE, LIS = CCR_LOOPINVALSIZE, +// CLF = CCR_CACHELOOPFLAG, LF = CCR_LOOPFLAG +// The cache is a ring buffer; CRA operates modulo 64. +// The cache is filled from (CA - CIS) into (CRA - CIS). // The engine has a fetch threshold of 32 bytes, so it tries to keep -// CCR_CACHEINVALIDSIZE below 8 (16-bit stereo), 16 (16-bit mono, -// 8-bit stereo), or 32 (8-bit mono). The actual transfers are pretty -// unpredictable, especially if several voices are running. -// Frames are consumed at CCR_READADDRESS, which is incremented afterwards, -// along with CCCA_CURRADDR and CCR_CACHEINVALIDSIZE. This implies that the -// actual playback position always lags CCCA_CURRADDR by exactly 64 frames. +// CIS below 8 (16-bit stereo), 16 (16-bit mono, 8-bit stereo), or +// 32 (8-bit mono). The actual transfers are pretty unpredictable, +// especially if several voices are running. +// Frames are consumed at CRA, which is incremented afterwards, +// along with CA and CIS. This implies that the actual playback +// position always lags CA by exactly 64 frames. +// When CA reaches DSL_LOOPENDADDR, LF is set for one frame's time. +// LF's rising edge causes the current values of CA and CIS to be +// copied into CLA and LIS, resp., and CLF to be set. +// If CLF is set, the first LIS of the CIS frames are instead +// filled from (CLA - LIS), and CLF is subsequently reset. #define CD0 0x20 /* Cache data registers 0 .. 0x1f */ #define PTB 0x40 /* Page table base register */ @@ -1843,6 +1851,7 @@ void snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 *emu, u32 dst, u32 s u32 snd_emu1010_fpga_link_dst_src_read(struct snd_emu10k1 *emu, u32 dst); int snd_emu1010_get_raw_rate(struct snd_emu10k1 *emu, u8 src); void snd_emu1010_update_clock(struct snd_emu10k1 *emu); +void snd_emu1010_load_firmware_entry(struct snd_emu10k1 *emu, int dock, const struct firmware *fw_entry); unsigned int snd_emu10k1_efx_read(struct snd_emu10k1 *emu, unsigned int pc); void snd_emu10k1_intr_enable(struct snd_emu10k1 *emu, unsigned int intrenb); void snd_emu10k1_intr_disable(struct snd_emu10k1 *emu, unsigned int intrenb); @@ -1885,8 +1894,8 @@ int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size, struct snd_dma_buffer *dmab); struct snd_util_memblk *snd_emu10k1_synth_alloc(struct snd_emu10k1 *emu, unsigned int size); int snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *blk); -int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, int size); -int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, const char __user *data, int size); +int snd_emu10k1_synth_memset(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, int size, u8 value); +int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk, int offset, const char __user *data, int size, u32 xor); int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk); /* voice allocation */ diff --git a/include/sound/hda-mlink.h b/include/sound/hda-mlink.h index d849d9b24f..9ced94686c 100644 --- a/include/sound/hda-mlink.h +++ b/include/sound/hda-mlink.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2022-2023 Intel Corporation. All rights reserved. + * Copyright(c) 2022-2023 Intel Corporation */ struct hdac_bus; diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 9c94ba7c18..575e55aa08 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -109,11 +109,9 @@ struct hda_codec_ops { void (*unsol_event)(struct hda_codec *codec, unsigned int res); void (*set_power_state)(struct hda_codec *codec, hda_nid_t fg, unsigned int power_state); -#ifdef CONFIG_PM int (*suspend)(struct hda_codec *codec); int (*resume)(struct hda_codec *codec); int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid); -#endif void (*stream_pm)(struct hda_codec *codec, hda_nid_t nid, bool on); }; @@ -259,11 +257,9 @@ struct hda_codec { unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */ unsigned int ctl_dev_id:1; /* old control element id build behaviour */ -#ifdef CONFIG_PM unsigned long power_on_acct; unsigned long power_off_acct; unsigned long power_jiffies; -#endif /* filter the requested power state per nid */ unsigned int (*power_filter)(struct hda_codec *codec, hda_nid_t nid, @@ -481,10 +477,8 @@ extern const struct dev_pm_ops hda_codec_driver_pm; static inline int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid) { -#ifdef CONFIG_PM if (codec->patch_ops.check_power_status) return codec->patch_ops.check_power_status(codec, nid); -#endif return 0; } @@ -495,14 +489,9 @@ int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid) #define snd_hda_power_up_pm(codec) snd_hdac_power_up_pm(&(codec)->core) #define snd_hda_power_down(codec) snd_hdac_power_down(&(codec)->core) #define snd_hda_power_down_pm(codec) snd_hdac_power_down_pm(&(codec)->core) -#ifdef CONFIG_PM void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay); void snd_hda_set_power_save(struct hda_bus *bus, int delay); void snd_hda_update_power_acct(struct hda_codec *codec); -#else -static inline void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay) {} -static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {} -#endif static inline bool hda_codec_need_resume(struct hda_codec *codec) { diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index a73d7f34f4..1d10939e40 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -351,6 +351,7 @@ struct hdac_bus { bool needs_damn_long_delay:1; bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */ + bool use_pio_for_commands:1; /* Use PIO instead of CORB for commands */ int poll_count; @@ -731,6 +732,7 @@ static inline unsigned int snd_array_index(struct snd_array *array, void *ptr) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_0) }, \ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_1) }, \ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_2) }, \ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_BMG) }, \ { } \ }, pci) || HDA_CONTROLLER_IS_HSW(pci)) diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 210096f124..3edd7a7346 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -120,9 +120,11 @@ struct snd_pcm_ops { #define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */ #define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */ #define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */ +#define SNDRV_PCM_RATE_705600 (1U<<15) /* 705600Hz */ +#define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */ #define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */ -#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */ +#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\ @@ -135,6 +137,9 @@ struct snd_pcm_ops { #define SNDRV_PCM_RATE_8000_384000 (SNDRV_PCM_RATE_8000_192000|\ SNDRV_PCM_RATE_352800|\ SNDRV_PCM_RATE_384000) +#define SNDRV_PCM_RATE_8000_768000 (SNDRV_PCM_RATE_8000_384000|\ + SNDRV_PCM_RATE_705600|\ + SNDRV_PCM_RATE_768000) #define _SNDRV_PCM_FMTBIT(fmt) (1ULL << (__force int)SNDRV_PCM_FORMAT_##fmt) #define SNDRV_PCM_FMTBIT_S8 _SNDRV_PCM_FMTBIT(S8) #define SNDRV_PCM_FMTBIT_U8 _SNDRV_PCM_FMTBIT(U8) diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h index 845e7608ac..4843b57798 100644 --- a/include/sound/soc-acpi-intel-match.h +++ b/include/sound/soc-acpi-intel-match.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright (C) 2017, Intel Corporation. All rights reserved. + * Copyright (C) 2017, Intel Corporation */ #ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H diff --git a/include/sound/soc-acpi-intel-ssp-common.h b/include/sound/soc-acpi-intel-ssp-common.h new file mode 100644 index 0000000000..b4597c8dac --- /dev/null +++ b/include/sound/soc-acpi-intel-ssp-common.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2023 Intel Corporation. + */ + +#ifndef __LINUX_SND_SOC_ACPI_INTEL_SSP_COMMON_H +#define __LINUX_SND_SOC_ACPI_INTEL_SSP_COMMON_H + +/* Cirrus Logic */ +#define CS35L41_ACPI_HID "CSC3541" +#define CS42L42_ACPI_HID "10134242" + +/* Dialog */ +#define DA7219_ACPI_HID "DLGS7219" + +/* Everest */ +#define ES8316_ACPI_HID "ESSX8316" +#define ES8326_ACPI_HID "ESSX8326" +#define ES8336_ACPI_HID "ESSX8336" + +#define MAX_98357A_ACPI_HID "MX98357A" +#define MAX_98360A_ACPI_HID "MX98360A" +#define MAX_98373_ACPI_HID "MX98373" +#define MAX_98390_ACPI_HID "MX98390" + +/* Nuvoton */ +#define NAU8318_ACPI_HID "NVTN2012" +#define NAU8825_ACPI_HID "10508825" + +/* Realtek */ +#define RT1011_ACPI_HID "10EC1011" +#define RT1015_ACPI_HID "10EC1015" +#define RT1015P_ACPI_HID "RTL1015" +#define RT1019P_ACPI_HID "RTL1019" +#define RT1308_ACPI_HID "10EC1308" +#define RT5650_ACPI_HID "10EC5650" +#define RT5682_ACPI_HID "10EC5682" +#define RT5682S_ACPI_HID "RTL5682" + +enum snd_soc_acpi_intel_codec { + CODEC_NONE, + + /* headphone codec */ + CODEC_CS42L42, + CODEC_DA7219, + CODEC_ES8316, + CODEC_ES8326, + CODEC_ES8336, + CODEC_NAU8825, + CODEC_RT5650, + CODEC_RT5682, + CODEC_RT5682S, + + /* speaker amplifier */ + CODEC_CS35L41, + CODEC_MAX98357A, + CODEC_MAX98360A, + CODEC_MAX98373, + CODEC_MAX98390, + CODEC_NAU8318, + CODEC_RT1011, + CODEC_RT1015, + CODEC_RT1015P, + CODEC_RT1019P, + CODEC_RT1308, +}; + +enum snd_soc_acpi_intel_codec +snd_soc_acpi_intel_detect_codec_type(struct device *dev); +enum snd_soc_acpi_intel_codec +snd_soc_acpi_intel_detect_amp_type(struct device *dev); + +const char * +snd_soc_acpi_intel_get_codec_name(enum snd_soc_acpi_intel_codec codec_type); + +const char * +snd_soc_acpi_intel_get_codec_tplg_suffix(enum snd_soc_acpi_intel_codec codec_type); +const char * +snd_soc_acpi_intel_get_amp_tplg_suffix(enum snd_soc_acpi_intel_codec codec_type); + +#endif /* __LINUX_SND_SOC_ACPI_INTEL_SSP_COMMON_H */ diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 23d6d6bfb0..38ccec4e3f 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only * - * Copyright (C) 2013-15, Intel Corporation. All rights reserved. + * Copyright (C) 2013-15, Intel Corporation */ #ifndef __LINUX_SND_SOC_ACPI_H @@ -151,6 +151,18 @@ struct snd_soc_acpi_link_adr { */ #define SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER BIT(2) +/* + * when set the speaker amplifier name suffix (i.e. "-max98360a") will be + * appended to topology file name + */ +#define SND_SOC_ACPI_TPLG_INTEL_AMP_NAME BIT(3) + +/* + * when set the headphone codec name suffix (i.e. "-rt5682") will be appended to + * topology file name + */ +#define SND_SOC_ACPI_TPLG_INTEL_CODEC_NAME BIT(4) + /** * snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are * related to the hardware, except for the firmware and topology file names. diff --git a/include/sound/soc-jack.h b/include/sound/soc-jack.h index a0abb1ee51..3a81d4b8ca 100644 --- a/include/sound/soc-jack.h +++ b/include/sound/soc-jack.h @@ -44,7 +44,6 @@ struct snd_soc_jack_zone { /** * struct snd_soc_jack_gpio - Describes a gpio pin for jack detection * - * @gpio: legacy gpio number * @idx: gpio descriptor index within the function of the GPIO * consumer device * @gpiod_dev: GPIO consumer device @@ -59,7 +58,6 @@ struct snd_soc_jack_zone { * ADC). */ struct snd_soc_jack_gpio { - unsigned int gpio; unsigned int idx; struct device *gpiod_dev; const char *name; diff --git a/include/sound/soc.h b/include/sound/soc.h index 39613b406b..33671437ee 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -11,20 +11,30 @@ #define __LINUX_SND_SOC_H #include <linux/args.h> +#include <linux/array_size.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/lockdep.h> +#include <linux/log2.h> +#include <linux/mutex.h> +#include <linux/notifier.h> #include <linux/of.h> -#include <linux/platform_device.h> #include <linux/types.h> -#include <linux/notifier.h> #include <linux/workqueue.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/regmap.h> -#include <linux/log2.h> -#include <sound/core.h> -#include <sound/pcm.h> + +#include <sound/ac97_codec.h> #include <sound/compress_driver.h> #include <sound/control.h> -#include <sound/ac97_codec.h> +#include <sound/core.h> +#include <sound/pcm.h> + +struct module; +struct platform_device; + +/* For the current users of sound/soc.h to avoid build issues */ +#include <linux/platform_device.h> +#include <linux/regmap.h> /* * Convenience kcontrol builders @@ -149,6 +159,18 @@ {.reg = xreg, .rreg = xreg, \ .shift = shift_left, .rshift = shift_right, \ .max = xmax, .min = xmin} } +#define SOC_DOUBLE_RANGE_TLV(xname, xreg, xshift_left, xshift_right, xmin, xmax, \ + xinvert, tlv_array) \ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ + .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ + SNDRV_CTL_ELEM_ACCESS_READWRITE,\ + .tlv.p = (tlv_array), \ + .info = snd_soc_info_volsw, \ + .get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \ + .private_value = (unsigned long)&(struct soc_mixer_control) \ + {.reg = xreg, .rreg = xreg, \ + .shift = xshift_left, .rshift = xshift_right, \ + .min = xmin, .max = xmax, .invert = xinvert} } #define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ @@ -400,7 +422,6 @@ #define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \ const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts) -struct device_node; struct snd_jack; struct snd_soc_card; struct snd_soc_pcm_stream; @@ -415,6 +436,7 @@ struct soc_enum; struct snd_soc_jack; struct snd_soc_jack_zone; struct snd_soc_jack_pin; + #include <sound/soc-dapm.h> #include <sound/soc-dpcm.h> #include <sound/soc-topology.h> @@ -1193,8 +1215,12 @@ struct snd_soc_pcm_runtime { /* see soc_new_pcm_runtime() */ #define snd_soc_rtd_to_cpu(rtd, n) (rtd)->dais[n] #define snd_soc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus] -#define snd_soc_substream_to_rtd(substream) \ - (struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream) + +static inline struct snd_soc_pcm_runtime * +snd_soc_substream_to_rtd(const struct snd_pcm_substream *substream) +{ + return snd_pcm_substream_chip(substream); +} #define for_each_rtd_components(rtd, i, component) \ for ((i) = 0, component = NULL; \ @@ -1213,6 +1239,10 @@ struct snd_soc_pcm_runtime { ((i) < (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs) && \ ((dai) = (rtd)->dais[i]); \ (i)++) +#define for_each_rtd_dais_reverse(rtd, i, dai) \ + for ((i) = (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs - 1; \ + (i) >= 0 && ((dai) = (rtd)->dais[i]); \ + (i)--) #define for_each_rtd_ch_maps(rtd, i, ch_maps) for_each_link_ch_maps(rtd->dai_link, i, ch_maps) void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd); diff --git a/include/sound/sof.h b/include/sound/sof.h index 05213bb515..ec6c30d545 100644 --- a/include/sound/sof.h +++ b/include/sound/sof.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> */ @@ -166,7 +166,7 @@ struct sof_dev_desc { /* default firmware name */ const char *default_fw_filename[SOF_IPC_TYPE_COUNT]; - struct snd_sof_dsp_ops *ops; + const struct snd_sof_dsp_ops *ops; int (*ops_init)(struct snd_sof_dev *sdev); void (*ops_free)(struct snd_sof_dev *sdev); }; diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h index d363f0ca69..2a177a1938 100644 --- a/include/sound/sof/channel_map.h +++ b/include/sound/sof/channel_map.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2019 Intel Corporation. All rights reserved. + * Copyright(c) 2019 Intel Corporation */ #ifndef __IPC_CHANNEL_MAP_H__ diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h index 983d374fe5..8d3300dd9d 100644 --- a/include/sound/sof/control.h +++ b/include/sound/sof/control.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_CONTROL_H__ diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index 5b93b7292f..e4e710b43d 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_DAI_INTEL_H__ diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index 0764a80c17..36809f7127 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_DAI_H__ diff --git a/include/sound/sof/debug.h b/include/sound/sof/debug.h index 38693e3fb5..8b308d7e5e 100644 --- a/include/sound/sof/debug.h +++ b/include/sound/sof/debug.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2020 Intel Corporation * * Author: Karol Trzcinski <karolx.trzcinski@linux.intel.com> */ diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h index 2a7e055584..fc0231d04a 100644 --- a/include/sound/sof/ext_manifest.h +++ b/include/sound/sof/ext_manifest.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2020 Intel Corporation */ /* diff --git a/include/sound/sof/ext_manifest4.h b/include/sound/sof/ext_manifest4.h index ec97edcbbf..4e1d379d96 100644 --- a/include/sound/sof/ext_manifest4.h +++ b/include/sound/sof/ext_manifest4.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2022 Intel Corporation */ /* diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h index b22e925c70..4e406dc22f 100644 --- a/include/sound/sof/header.h +++ b/include/sound/sof/header.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_HEADER_H__ diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h index 75193850ea..08400fbe54 100644 --- a/include/sound/sof/info.h +++ b/include/sound/sof/info.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_INFO_H__ diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h index 1eb538e182..0c0cf47946 100644 --- a/include/sound/sof/ipc4/header.h +++ b/include/sound/sof/ipc4/header.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2022 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_IPC4_HEADER_H__ diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h index 366aa6ec44..df55bfe583 100644 --- a/include/sound/sof/pm.h +++ b/include/sound/sof/pm.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_PM_H__ diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h index 9377113f13..1bb25487d1 100644 --- a/include/sound/sof/stream.h +++ b/include/sound/sof/stream.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_STREAM_H__ diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h index b3ca886fa2..3ba086f619 100644 --- a/include/sound/sof/topology.h +++ b/include/sound/sof/topology.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_TOPOLOGY_H__ diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h index 25ea99f62d..ac2ebb6fb3 100644 --- a/include/sound/sof/trace.h +++ b/include/sound/sof/trace.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_TRACE_H__ diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h index 87a07e5204..ef70f8e266 100644 --- a/include/sound/sof/xtensa.h +++ b/include/sound/sof/xtensa.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_SOUND_SOF_XTENSA_H__ diff --git a/include/sound/soundfont.h b/include/sound/soundfont.h index e445688a4f..98ed98d89d 100644 --- a/include/sound/soundfont.h +++ b/include/sound/soundfont.h @@ -89,7 +89,7 @@ struct snd_sf_list { int snd_soundfont_load(struct snd_sf_list *sflist, const void __user *data, long count, int client); int snd_soundfont_load_guspatch(struct snd_sf_list *sflist, const char __user *data, - long count, int client); + long count); int snd_soundfont_close_check(struct snd_sf_list *sflist, int client); struct snd_sf_list *snd_sf_new(struct snd_sf_callback *callback, diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h index 7fba7ea26a..3cda9da14f 100644 --- a/include/sound/tas2781-dsp.h +++ b/include/sound/tas2781-dsp.h @@ -117,10 +117,17 @@ struct tasdevice_fw { struct device *dev; }; -enum tasdevice_dsp_fw_state { - TASDEVICE_DSP_FW_NONE = 0, +enum tasdevice_fw_state { + /* Driver in startup mode, not load any firmware. */ TASDEVICE_DSP_FW_PENDING, + /* DSP firmware in the system, but parsing error. */ TASDEVICE_DSP_FW_FAIL, + /* + * Only RCA (Reconfigurable Architecture) firmware load + * successfully. + */ + TASDEVICE_RCA_FW_OK, + /* Both RCA and DSP firmware load successfully. */ TASDEVICE_DSP_FW_ALL_OK, }; diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index e609cd7da4..a2ea11cc91 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -46,8 +46,7 @@ static notrace void \ __bpf_trace_##call(void *__data, proto) \ { \ - struct bpf_prog *prog = __data; \ - CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ + CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \ } #undef DECLARE_EVENT_CLASS diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h index 517015ef36..202fc3680c 100644 --- a/include/trace/events/asoc.h +++ b/include/trace/events/asoc.h @@ -32,8 +32,8 @@ DECLARE_EVENT_CLASS(snd_soc_dapm, ), TP_fast_assign( - __assign_str(card_name, dapm->card->name); - __assign_str(comp_name, dapm->component ? dapm->component->name : "(none)"); + __assign_str(card_name); + __assign_str(comp_name); __entry->val = val; ), @@ -69,7 +69,7 @@ DECLARE_EVENT_CLASS(snd_soc_dapm_basic, ), TP_fast_assign( - __assign_str(name, card->name); + __assign_str(name); __entry->event = event; ), @@ -104,7 +104,7 @@ DECLARE_EVENT_CLASS(snd_soc_dapm_widget, ), TP_fast_assign( - __assign_str(name, w->name); + __assign_str(name); __entry->val = val; ), @@ -150,7 +150,7 @@ TRACE_EVENT(snd_soc_dapm_walk_done, ), TP_fast_assign( - __assign_str(name, card->name); + __assign_str(name); __entry->power_checks = card->dapm_stats.power_checks; __entry->path_checks = card->dapm_stats.path_checks; __entry->neighbour_checks = card->dapm_stats.neighbour_checks; @@ -179,9 +179,9 @@ TRACE_EVENT(snd_soc_dapm_path, ), TP_fast_assign( - __assign_str(wname, widget->name); - __assign_str(pname, path->name ? path->name : DAPM_DIRECT); - __assign_str(pnname, path->node[dir]->name); + __assign_str(wname); + __assign_str(pname); + __assign_str(pnname); __entry->path_connect = path->connect; __entry->path_node = (long)path->node[dir]; __entry->path_dir = dir; @@ -226,7 +226,7 @@ TRACE_EVENT(snd_soc_jack_irq, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); ), TP_printk("%s", __get_str(name)) @@ -245,7 +245,7 @@ TRACE_EVENT(snd_soc_jack_report, ), TP_fast_assign( - __assign_str(name, jack->jack->id); + __assign_str(name); __entry->mask = mask; __entry->val = val; ), @@ -266,7 +266,7 @@ TRACE_EVENT(snd_soc_jack_notify, ), TP_fast_assign( - __assign_str(name, jack->jack->id); + __assign_str(name); __entry->val = val; ), diff --git a/include/trace/events/avc.h b/include/trace/events/avc.h index b55fda2e07..fed0f141d5 100644 --- a/include/trace/events/avc.h +++ b/include/trace/events/avc.h @@ -36,9 +36,9 @@ TRACE_EVENT(selinux_audited, __entry->denied = sad->denied; __entry->audited = sad->audited; __entry->result = sad->result; - __assign_str(tcontext, tcontext); - __assign_str(scontext, scontext); - __assign_str(tclass, tclass); + __assign_str(tcontext); + __assign_str(scontext); + __assign_str(tclass); ), TP_printk("requested=0x%x denied=0x%x audited=0x%x result=%d scontext=%s tcontext=%s tclass=%s", diff --git a/include/trace/events/bpf_test_run.h b/include/trace/events/bpf_test_run.h index 265447e3f7..0c924d39b7 100644 --- a/include/trace/events/bpf_test_run.h +++ b/include/trace/events/bpf_test_run.h @@ -7,6 +7,23 @@ #include <linux/tracepoint.h> +TRACE_EVENT(bpf_trigger_tp, + + TP_PROTO(int nonce), + + TP_ARGS(nonce), + + TP_STRUCT__entry( + __field(int, nonce) + ), + + TP_fast_assign( + __entry->nonce = nonce; + ), + + TP_printk("nonce %d", __entry->nonce) +); + DECLARE_EVENT_CLASS(bpf_test_finish, TP_PROTO(int *err), diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h index a6b3a4e409..3fe4725c83 100644 --- a/include/trace/events/bridge.h +++ b/include/trace/events/bridge.h @@ -25,7 +25,7 @@ TRACE_EVENT(br_fdb_add, ), TP_fast_assign( - __assign_str(dev, dev->name); + __assign_str(dev); memcpy(__entry->addr, addr, ETH_ALEN); __entry->vid = vid; __entry->nlh_flags = nlh_flags; @@ -54,8 +54,8 @@ TRACE_EVENT(br_fdb_external_learn_add, ), TP_fast_assign( - __assign_str(br_dev, br->dev->name); - __assign_str(dev, p ? p->dev->name : "null"); + __assign_str(br_dev); + __assign_str(dev); memcpy(__entry->addr, addr, ETH_ALEN); __entry->vid = vid; ), @@ -80,8 +80,8 @@ TRACE_EVENT(fdb_delete, ), TP_fast_assign( - __assign_str(br_dev, br->dev->name); - __assign_str(dev, f->dst ? f->dst->dev->name : "null"); + __assign_str(br_dev); + __assign_str(dev); memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN); __entry->vid = f->key.vlan_id; ), @@ -108,8 +108,8 @@ TRACE_EVENT(br_fdb_update, ), TP_fast_assign( - __assign_str(br_dev, br->dev->name); - __assign_str(dev, source->dev->name); + __assign_str(br_dev); + __assign_str(dev); memcpy(__entry->addr, addr, ETH_ALEN); __entry->vid = vid; __entry->flags = flags; @@ -141,7 +141,7 @@ TRACE_EVENT(br_mdb_full, TP_fast_assign( struct in6_addr *in6; - __assign_str(dev, dev->name); + __assign_str(dev); __entry->vid = group->vid; if (!group->proto) { diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 90b0222390..c978fa2893 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -16,8 +16,6 @@ struct extent_map; struct btrfs_file_extent_item; struct btrfs_ordered_extent; struct btrfs_delayed_ref_node; -struct btrfs_delayed_tree_ref; -struct btrfs_delayed_data_ref; struct btrfs_delayed_ref_head; struct btrfs_block_group; struct btrfs_free_cluster; @@ -277,8 +275,7 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, { EXTENT_FLAG_COMPRESS_LZO, "COMPRESS_LZO" },\ { EXTENT_FLAG_COMPRESS_ZSTD, "COMPRESS_ZSTD" },\ { EXTENT_FLAG_PREALLOC, "PREALLOC" },\ - { EXTENT_FLAG_LOGGING, "LOGGING" },\ - { EXTENT_FLAG_FILLING, "FILLING" }) + { EXTENT_FLAG_LOGGING, "LOGGING" }) TRACE_EVENT_CONDITION(btrfs_get_extent, @@ -869,11 +866,9 @@ TRACE_EVENT(btrfs_add_block_group, DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_delayed_ref_node *ref, - const struct btrfs_delayed_tree_ref *full_ref, - int action), + const struct btrfs_delayed_ref_node *ref), - TP_ARGS(fs_info, ref, full_ref, action), + TP_ARGS(fs_info, ref), TP_STRUCT__entry_btrfs( __field( u64, bytenr ) @@ -889,10 +884,10 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, TP_fast_assign_btrfs(fs_info, __entry->bytenr = ref->bytenr; __entry->num_bytes = ref->num_bytes; - __entry->action = action; - __entry->parent = full_ref->parent; - __entry->ref_root = full_ref->root; - __entry->level = full_ref->level; + __entry->action = ref->action; + __entry->parent = ref->parent; + __entry->ref_root = ref->ref_root; + __entry->level = ref->tree_ref.level; __entry->type = ref->type; __entry->seq = ref->seq; ), @@ -912,31 +907,25 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_delayed_ref_node *ref, - const struct btrfs_delayed_tree_ref *full_ref, - int action), + const struct btrfs_delayed_ref_node *ref), - TP_ARGS(fs_info, ref, full_ref, action) + TP_ARGS(fs_info, ref) ); DEFINE_EVENT(btrfs_delayed_tree_ref, run_delayed_tree_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_delayed_ref_node *ref, - const struct btrfs_delayed_tree_ref *full_ref, - int action), + const struct btrfs_delayed_ref_node *ref), - TP_ARGS(fs_info, ref, full_ref, action) + TP_ARGS(fs_info, ref) ); DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_delayed_ref_node *ref, - const struct btrfs_delayed_data_ref *full_ref, - int action), + const struct btrfs_delayed_ref_node *ref), - TP_ARGS(fs_info, ref, full_ref, action), + TP_ARGS(fs_info, ref), TP_STRUCT__entry_btrfs( __field( u64, bytenr ) @@ -953,11 +942,11 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, TP_fast_assign_btrfs(fs_info, __entry->bytenr = ref->bytenr; __entry->num_bytes = ref->num_bytes; - __entry->action = action; - __entry->parent = full_ref->parent; - __entry->ref_root = full_ref->root; - __entry->owner = full_ref->objectid; - __entry->offset = full_ref->offset; + __entry->action = ref->action; + __entry->parent = ref->parent; + __entry->ref_root = ref->ref_root; + __entry->owner = ref->data_ref.objectid; + __entry->offset = ref->data_ref.offset; __entry->type = ref->type; __entry->seq = ref->seq; ), @@ -979,21 +968,17 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_delayed_ref_node *ref, - const struct btrfs_delayed_data_ref *full_ref, - int action), + const struct btrfs_delayed_ref_node *ref), - TP_ARGS(fs_info, ref, full_ref, action) + TP_ARGS(fs_info, ref) ); DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_delayed_ref_node *ref, - const struct btrfs_delayed_data_ref *full_ref, - int action), + const struct btrfs_delayed_ref_node *ref), - TP_ARGS(fs_info, ref, full_ref, action) + TP_ARGS(fs_info, ref) ); DECLARE_EVENT_CLASS(btrfs_delayed_ref_head, @@ -1155,7 +1140,7 @@ TRACE_EVENT(btrfs_space_reservation, ), TP_fast_assign_btrfs(fs_info, - __assign_str(type, type); + __assign_str(type); __entry->val = val; __entry->bytes = bytes; __entry->reserve = reserve; @@ -1184,7 +1169,7 @@ TRACE_EVENT(btrfs_trigger_flush, __entry->flags = flags; __entry->bytes = bytes; __entry->flush = flush; - __assign_str(reason, reason); + __assign_str(reason); ), TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu", @@ -1637,7 +1622,7 @@ DECLARE_EVENT_CLASS(btrfs_workqueue, TP_fast_assign_btrfs(btrfs_workqueue_owner(wq), __entry->wq = wq; - __assign_str(name, name); + __assign_str(name); ), TP_printk_btrfs("name=%s wq=%p", __get_str(name), @@ -2552,6 +2537,107 @@ TRACE_EVENT(btrfs_get_raid_extent_offset, __entry->devid) ); +TRACE_EVENT(btrfs_extent_map_shrinker_count, + + TP_PROTO(const struct btrfs_fs_info *fs_info, long nr), + + TP_ARGS(fs_info, nr), + + TP_STRUCT__entry_btrfs( + __field( long, nr ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->nr = nr; + ), + + TP_printk_btrfs("nr=%ld", __entry->nr) +); + +TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter, + + TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr, + u64 last_root_id, u64 last_ino), + + TP_ARGS(fs_info, nr_to_scan, nr, last_root_id, last_ino), + + TP_STRUCT__entry_btrfs( + __field( long, nr_to_scan ) + __field( long, nr ) + __field( u64, last_root_id ) + __field( u64, last_ino ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->nr_to_scan = nr_to_scan; + __entry->nr = nr; + __entry->last_root_id = last_root_id; + __entry->last_ino = last_ino; + ), + + TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu", + __entry->nr_to_scan, __entry->nr, + show_root_type(__entry->last_root_id), __entry->last_ino) +); + +TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit, + + TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr, + u64 last_root_id, u64 last_ino), + + TP_ARGS(fs_info, nr_dropped, nr, last_root_id, last_ino), + + TP_STRUCT__entry_btrfs( + __field( long, nr_dropped ) + __field( long, nr ) + __field( u64, last_root_id ) + __field( u64, last_ino ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->nr_dropped = nr_dropped; + __entry->nr = nr; + __entry->last_root_id = last_root_id; + __entry->last_ino = last_ino; + ), + + TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu", + __entry->nr_dropped, __entry->nr, + show_root_type(__entry->last_root_id), __entry->last_ino) +); + +TRACE_EVENT(btrfs_extent_map_shrinker_remove_em, + + TP_PROTO(const struct btrfs_inode *inode, const struct extent_map *em), + + TP_ARGS(inode, em), + + TP_STRUCT__entry_btrfs( + __field( u64, ino ) + __field( u64, root_id ) + __field( u64, start ) + __field( u64, len ) + __field( u64, block_start ) + __field( u32, flags ) + ), + + TP_fast_assign_btrfs(inode->root->fs_info, + __entry->ino = btrfs_ino(inode); + __entry->root_id = inode->root->root_key.objectid; + __entry->start = em->start; + __entry->len = em->len; + __entry->block_start = em->block_start; + __entry->flags = em->flags; + ), + + TP_printk_btrfs( +"ino=%llu root=%llu(%s) start=%llu len=%llu block_start=%llu(%s) flags=%s", + __entry->ino, show_root_type(__entry->root_id), + __entry->start, __entry->len, + show_map_type(__entry->block_start), + show_map_flags(__entry->flags)) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h index dd7d7c9efe..af2755bda6 100644 --- a/include/trace/events/cgroup.h +++ b/include/trace/events/cgroup.h @@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(cgroup_root, TP_fast_assign( __entry->root = root->hierarchy_id; __entry->ss_mask = root->subsys_mask; - __assign_str(name, root->name); + __assign_str(name); ), TP_printk("root=%d ss_mask=%#x name=%s", @@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(cgroup, __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; - __assign_str(path, path); + __assign_str(path); ), TP_printk("root=%d id=%llu level=%d path=%s", @@ -137,9 +137,9 @@ DECLARE_EVENT_CLASS(cgroup_migrate, __entry->dst_root = dst_cgrp->root->hierarchy_id; __entry->dst_id = cgroup_id(dst_cgrp); __entry->dst_level = dst_cgrp->level; - __assign_str(dst_path, path); + __assign_str(dst_path); __entry->pid = task->pid; - __assign_str(comm, task->comm); + __assign_str(comm); ), TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s", @@ -181,7 +181,7 @@ DECLARE_EVENT_CLASS(cgroup_event, __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; - __assign_str(path, path); + __assign_str(path); __entry->val = val; ), @@ -204,6 +204,98 @@ DEFINE_EVENT(cgroup_event, cgroup_notify_frozen, TP_ARGS(cgrp, path, val) ); +DECLARE_EVENT_CLASS(cgroup_rstat, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended), + + TP_STRUCT__entry( + __field( int, root ) + __field( int, level ) + __field( u64, id ) + __field( int, cpu ) + __field( bool, contended ) + ), + + TP_fast_assign( + __entry->root = cgrp->root->hierarchy_id; + __entry->id = cgroup_id(cgrp); + __entry->level = cgrp->level; + __entry->cpu = cpu; + __entry->contended = contended; + ), + + TP_printk("root=%d id=%llu level=%d cpu=%d lock contended:%d", + __entry->root, __entry->id, __entry->level, + __entry->cpu, __entry->contended) +); + +/* Related to global: cgroup_rstat_lock */ +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_locked, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +/* Related to per CPU: cgroup_rstat_cpu_lock */ +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended_fastpath, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked_fastpath, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock_fastpath, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + #endif /* _TRACE_CGROUP_H */ /* This part must be outside protection */ diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h index daed3c7a48..759f7371a6 100644 --- a/include/trace/events/clk.h +++ b/include/trace/events/clk.h @@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(clk, ), TP_fast_assign( - __assign_str(name, core->name); + __assign_str(name); ), TP_printk("%s", __get_str(name)) @@ -97,7 +97,7 @@ DECLARE_EVENT_CLASS(clk_rate, ), TP_fast_assign( - __assign_str(name, core->name); + __assign_str(name); __entry->rate = rate; ), @@ -145,7 +145,7 @@ DECLARE_EVENT_CLASS(clk_rate_range, ), TP_fast_assign( - __assign_str(name, core->name); + __assign_str(name); __entry->min = min; __entry->max = max; ), @@ -174,8 +174,8 @@ DECLARE_EVENT_CLASS(clk_parent, ), TP_fast_assign( - __assign_str(name, core->name); - __assign_str(pname, parent ? parent->name : "none"); + __assign_str(name); + __assign_str(pname); ), TP_printk("%s %s", __get_str(name), __get_str(pname)) @@ -207,7 +207,7 @@ DECLARE_EVENT_CLASS(clk_phase, ), TP_fast_assign( - __assign_str(name, core->name); + __assign_str(name); __entry->phase = phase; ), @@ -241,7 +241,7 @@ DECLARE_EVENT_CLASS(clk_duty_cycle, ), TP_fast_assign( - __assign_str(name, core->name); + __assign_str(name); __entry->num = duty->num; __entry->den = duty->den; ), @@ -279,8 +279,8 @@ DECLARE_EVENT_CLASS(clk_rate_request, ), TP_fast_assign( - __assign_str(name, req->core ? req->core->name : "none"); - __assign_str(pname, req->best_parent_hw ? clk_hw_get_name(req->best_parent_hw) : "none"); + __assign_str(name); + __assign_str(pname); __entry->min = req->min_rate; __entry->max = req->max_rate; __entry->prate = req->best_parent_rate; diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h index 25103e6773..383c09f583 100644 --- a/include/trace/events/cma.h +++ b/include/trace/events/cma.h @@ -23,7 +23,7 @@ TRACE_EVENT(cma_release, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->pfn = pfn; __entry->page = page; __entry->count = count; @@ -49,7 +49,7 @@ TRACE_EVENT(cma_alloc_start, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->count = count; __entry->align = align; ), @@ -77,7 +77,7 @@ TRACE_EVENT(cma_alloc_finish, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->pfn = pfn; __entry->page = page; __entry->count = count; @@ -110,7 +110,7 @@ TRACE_EVENT(cma_alloc_busy_retry, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->pfn = pfn; __entry->page = page; __entry->count = count; diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h index 7627c620bb..6cbc4d59fd 100644 --- a/include/trace/events/devfreq.h +++ b/include/trace/events/devfreq.h @@ -23,7 +23,7 @@ TRACE_EVENT(devfreq_frequency, ), TP_fast_assign( - __assign_str(dev_name, dev_name(&devfreq->dev)); + __assign_str(dev_name); __entry->freq = freq; __entry->prev_freq = prev_freq; __entry->busy_time = devfreq->last_status.busy_time; @@ -54,7 +54,7 @@ TRACE_EVENT(devfreq_monitor, __entry->busy_time = devfreq->last_status.busy_time; __entry->total_time = devfreq->last_status.total_time; __entry->polling_ms = devfreq->profile->polling_ms; - __assign_str(dev_name, dev_name(&devfreq->dev)); + __assign_str(dev_name); ), TP_printk("dev_name=%-30s freq=%-12lu polling_ms=%-3u load=%-2lu", diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h index 77ff7cfc60..f241e204fe 100644 --- a/include/trace/events/devlink.h +++ b/include/trace/events/devlink.h @@ -31,9 +31,9 @@ TRACE_EVENT(devlink_hwmsg, ), TP_fast_assign( - __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); - __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); - __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); + __assign_str(bus_name); + __assign_str(dev_name); + __assign_str(driver_name); __entry->incoming = incoming; __entry->type = type; memcpy(__get_dynamic_array(buf), buf, len); @@ -63,11 +63,11 @@ TRACE_EVENT(devlink_hwerr, ), TP_fast_assign( - __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); - __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); - __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); + __assign_str(bus_name); + __assign_str(dev_name); + __assign_str(driver_name); __entry->err = err; - __assign_str(msg, msg); + __assign_str(msg); ), TP_printk("bus_name=%s dev_name=%s driver_name=%s err=%d %s", @@ -93,11 +93,11 @@ TRACE_EVENT(devlink_health_report, ), TP_fast_assign( - __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); - __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); - __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); - __assign_str(reporter_name, reporter_name); - __assign_str(msg, msg); + __assign_str(bus_name); + __assign_str(dev_name); + __assign_str(driver_name); + __assign_str(reporter_name); + __assign_str(msg); ), TP_printk("bus_name=%s dev_name=%s driver_name=%s reporter_name=%s: %s", @@ -125,10 +125,10 @@ TRACE_EVENT(devlink_health_recover_aborted, ), TP_fast_assign( - __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); - __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); - __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); - __assign_str(reporter_name, reporter_name); + __assign_str(bus_name); + __assign_str(dev_name); + __assign_str(driver_name); + __assign_str(reporter_name); __entry->health_state = health_state; __entry->time_since_last_recover = time_since_last_recover; ), @@ -158,10 +158,10 @@ TRACE_EVENT(devlink_health_reporter_state_update, ), TP_fast_assign( - __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); - __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); - __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); - __assign_str(reporter_name, reporter_name); + __assign_str(bus_name); + __assign_str(dev_name); + __assign_str(driver_name); + __assign_str(reporter_name); __entry->new_state = new_state; ), @@ -192,11 +192,11 @@ TRACE_EVENT(devlink_trap_report, TP_fast_assign( struct net_device *input_dev = metadata->input_dev; - __assign_str(bus_name, devlink_to_dev(devlink)->bus->name); - __assign_str(dev_name, dev_name(devlink_to_dev(devlink))); - __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); - __assign_str(trap_name, metadata->trap_name); - __assign_str(trap_group_name, metadata->trap_group_name); + __assign_str(bus_name); + __assign_str(dev_name); + __assign_str(driver_name); + __assign_str(trap_name); + __assign_str(trap_group_name); strscpy(__entry->input_dev_name, input_dev ? input_dev->name : "NULL", IFNAMSIZ); ), diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h index c1a146f9fc..af160082c9 100644 --- a/include/trace/events/dlm.h +++ b/include/trace/events/dlm.h @@ -189,29 +189,25 @@ TRACE_EVENT(dlm_lock_end, TRACE_EVENT(dlm_bast, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode), + TP_PROTO(__u32 ls_id, __u32 lkb_id, int mode, + const char *res_name, size_t res_length), - TP_ARGS(ls, lkb, mode), + TP_ARGS(ls_id, lkb_id, mode, res_name, res_length), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) __field(int, mode) - __dynamic_array(unsigned char, res_name, - lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) + __dynamic_array(unsigned char, res_name, res_length) ), TP_fast_assign( - struct dlm_rsb *r; - - __entry->ls_id = ls->ls_global_id; - __entry->lkb_id = lkb->lkb_id; + __entry->ls_id = ls_id; + __entry->lkb_id = lkb_id; __entry->mode = mode; - r = lkb->lkb_resource; - if (r) - memcpy(__get_dynamic_array(res_name), r->res_name, - __get_dynamic_array_len(res_name)); + memcpy(__get_dynamic_array(res_name), res_name, + __get_dynamic_array_len(res_name)); ), TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s", @@ -224,31 +220,27 @@ TRACE_EVENT(dlm_bast, TRACE_EVENT(dlm_ast, - TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb), + TP_PROTO(__u32 ls_id, __u32 lkb_id, __u8 sb_flags, int sb_status, + const char *res_name, size_t res_length), - TP_ARGS(ls, lkb), + TP_ARGS(ls_id, lkb_id, sb_flags, sb_status, res_name, res_length), TP_STRUCT__entry( __field(__u32, ls_id) __field(__u32, lkb_id) - __field(u8, sb_flags) + __field(__u8, sb_flags) __field(int, sb_status) - __dynamic_array(unsigned char, res_name, - lkb->lkb_resource ? lkb->lkb_resource->res_length : 0) + __dynamic_array(unsigned char, res_name, res_length) ), TP_fast_assign( - struct dlm_rsb *r; - - __entry->ls_id = ls->ls_global_id; - __entry->lkb_id = lkb->lkb_id; - __entry->sb_flags = lkb->lkb_lksb->sb_flags; - __entry->sb_status = lkb->lkb_lksb->sb_status; + __entry->ls_id = ls_id; + __entry->lkb_id = lkb_id; + __entry->sb_flags = sb_flags; + __entry->sb_status = sb_status; - r = lkb->lkb_resource; - if (r) - memcpy(__get_dynamic_array(res_name), r->res_name, - __get_dynamic_array_len(res_name)); + memcpy(__get_dynamic_array(res_name), res_name, + __get_dynamic_array_len(res_name)); ), TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s", diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 3963e79ca7..a4de3df850 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -23,8 +23,8 @@ DECLARE_EVENT_CLASS(dma_fence, ), TP_fast_assign( - __assign_str(driver, fence->ops->get_driver_name(fence)); - __assign_str(timeline, fence->ops->get_timeline_name(fence)); + __assign_str(driver); + __assign_str(timeline); __entry->context = fence->context; __entry->seqno = fence->seqno; ), diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index e18684b02c..b9bbfd855f 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -47,7 +47,7 @@ TRACE_EVENT(erofs_lookup, TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->nid = EROFS_I(dir)->nid; - __assign_str(name, dentry->d_name.name); + __assign_str(name); __entry->flags = flags; ), diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 7ed0fc430d..ed794b5fef 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -354,7 +354,7 @@ TRACE_EVENT(f2fs_unlink_enter, __entry->ino = dir->i_ino; __entry->size = dir->i_size; __entry->blocks = dir->i_blocks; - __assign_str(name, dentry->d_name.name); + __assign_str(name); ), TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, " @@ -843,7 +843,7 @@ TRACE_EVENT(f2fs_lookup_start, TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->ino = dir->i_ino; - __assign_str(name, dentry->d_name.name); + __assign_str(name); __entry->flags = flags; ), @@ -871,7 +871,7 @@ TRACE_EVENT(f2fs_lookup_end, TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->ino = dir->i_ino; - __assign_str(name, dentry->d_name.name); + __assign_str(name); __entry->cino = ino; __entry->err = err; ), @@ -903,9 +903,9 @@ TRACE_EVENT(f2fs_rename_start, TP_fast_assign( __entry->dev = old_dir->i_sb->s_dev; __entry->ino = old_dir->i_ino; - __assign_str(old_name, old_dentry->d_name.name); + __assign_str(old_name); __entry->new_pino = new_dir->i_ino; - __assign_str(new_name, new_dentry->d_name.name); + __assign_str(new_name); __entry->flags = flags; ), @@ -937,8 +937,8 @@ TRACE_EVENT(f2fs_rename_end, TP_fast_assign( __entry->dev = old_dentry->d_sb->s_dev; __entry->ino = old_dentry->d_inode->i_ino; - __assign_str(old_name, old_dentry->d_name.name); - __assign_str(new_name, new_dentry->d_name.name); + __assign_str(old_name); + __assign_str(new_name); __entry->flags = flags; __entry->ret = ret; ), @@ -1304,11 +1304,11 @@ TRACE_EVENT(f2fs_write_end, __entry->copied) ); -DECLARE_EVENT_CLASS(f2fs__page, +DECLARE_EVENT_CLASS(f2fs__folio, - TP_PROTO(struct page *page, int type), + TP_PROTO(struct folio *folio, int type), - TP_ARGS(page, type), + TP_ARGS(folio, type), TP_STRUCT__entry( __field(dev_t, dev) @@ -1321,14 +1321,14 @@ DECLARE_EVENT_CLASS(f2fs__page, ), TP_fast_assign( - __entry->dev = page_file_mapping(page)->host->i_sb->s_dev; - __entry->ino = page_file_mapping(page)->host->i_ino; + __entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev; + __entry->ino = folio_file_mapping(folio)->host->i_ino; __entry->type = type; __entry->dir = - S_ISDIR(page_file_mapping(page)->host->i_mode); - __entry->index = page->index; - __entry->dirty = PageDirty(page); - __entry->uptodate = PageUptodate(page); + S_ISDIR(folio_file_mapping(folio)->host->i_mode); + __entry->index = folio_index(folio); + __entry->dirty = folio_test_dirty(folio); + __entry->uptodate = folio_test_uptodate(folio); ), TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, " @@ -1341,32 +1341,32 @@ DECLARE_EVENT_CLASS(f2fs__page, __entry->uptodate) ); -DEFINE_EVENT(f2fs__page, f2fs_writepage, +DEFINE_EVENT(f2fs__folio, f2fs_writepage, - TP_PROTO(struct page *page, int type), + TP_PROTO(struct folio *folio, int type), - TP_ARGS(page, type) + TP_ARGS(folio, type) ); -DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page, +DEFINE_EVENT(f2fs__folio, f2fs_do_write_data_page, - TP_PROTO(struct page *page, int type), + TP_PROTO(struct folio *folio, int type), - TP_ARGS(page, type) + TP_ARGS(folio, type) ); -DEFINE_EVENT(f2fs__page, f2fs_readpage, +DEFINE_EVENT(f2fs__folio, f2fs_readpage, - TP_PROTO(struct page *page, int type), + TP_PROTO(struct folio *folio, int type), - TP_ARGS(page, type) + TP_ARGS(folio, type) ); -DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty, +DEFINE_EVENT(f2fs__folio, f2fs_set_page_dirty, - TP_PROTO(struct page *page, int type), + TP_PROTO(struct folio *folio, int type), - TP_ARGS(page, type) + TP_ARGS(folio, type) ); TRACE_EVENT(f2fs_replace_atomic_write_block, @@ -1557,7 +1557,7 @@ TRACE_EVENT(f2fs_write_checkpoint, TP_fast_assign( __entry->dev = sb->s_dev; __entry->reason = reason; - __assign_str(dest_msg, msg); + __assign_str(dest_msg); ), TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", @@ -2333,12 +2333,12 @@ DECLARE_EVENT_CLASS(f2fs__rw_start, * because this screws up the tooling that parses * the traces. */ - __assign_str(pathbuf, pathname); + __assign_str(pathbuf); (void)strreplace(__get_str(pathbuf), ' ', '_'); __entry->offset = offset; __entry->bytes = bytes; __entry->i_size = i_size_read(inode); - __assign_str(cmdline, command); + __assign_str(cmdline); (void)strreplace(__get_str(cmdline), ' ', '_'); __entry->pid = pid; __entry->ino = inode->i_ino; diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h new file mode 100644 index 0000000000..5ccc0d91b2 --- /dev/null +++ b/include/trace/events/firewire.h @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright (c) 2024 Takashi Sakamoto + +#define TRACE_SYSTEM firewire + +#if !defined(_FIREWIRE_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _FIREWIRE_TRACE_EVENT_H + +#include <linux/tracepoint.h> +#include <linux/firewire.h> + +#include <linux/firewire-constants.h> + +#include "../../../drivers/firewire/packet-header-definitions.h" + +// The content of TP_printk field is preprocessed, then put to the module binary. +#define ASYNC_HEADER_GET_DESTINATION(header) \ + (((header)[0] & ASYNC_HEADER_Q0_DESTINATION_MASK) >> ASYNC_HEADER_Q0_DESTINATION_SHIFT) + +#define ASYNC_HEADER_GET_TLABEL(header) \ + (((header)[0] & ASYNC_HEADER_Q0_TLABEL_MASK) >> ASYNC_HEADER_Q0_TLABEL_SHIFT) + +#define ASYNC_HEADER_GET_TCODE(header) \ + (((header)[0] & ASYNC_HEADER_Q0_TCODE_MASK) >> ASYNC_HEADER_Q0_TCODE_SHIFT) + +#define ASYNC_HEADER_GET_SOURCE(header) \ + (((header)[1] & ASYNC_HEADER_Q1_SOURCE_MASK) >> ASYNC_HEADER_Q1_SOURCE_SHIFT) + +#define ASYNC_HEADER_GET_OFFSET(header) \ + ((((unsigned long long)((header)[1] & ASYNC_HEADER_Q1_OFFSET_HIGH_MASK)) >> ASYNC_HEADER_Q1_OFFSET_HIGH_SHIFT) << 32)| \ + (header)[2] + +#define ASYNC_HEADER_GET_RCODE(header) \ + (((header)[1] & ASYNC_HEADER_Q1_RCODE_MASK) >> ASYNC_HEADER_Q1_RCODE_SHIFT) + +#define QUADLET_SIZE 4 + +DECLARE_EVENT_CLASS(async_outbound_initiate_template, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, header, data, data_count), + TP_STRUCT__entry( + __field(u64, transaction) + __field(u8, card_index) + __field(u8, generation) + __field(u8, scode) + __array(u32, header, ASYNC_HEADER_QUADLET_COUNT) + __dynamic_array(u32, data, data_count) + ), + TP_fast_assign( + __entry->transaction = transaction; + __entry->card_index = card_index; + __entry->generation = generation; + __entry->scode = scode; + memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT); + memcpy(__get_dynamic_array(data), data, __get_dynamic_array_len(data)); + ), + // This format is for the request subaction. + TP_printk( + "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s", + __entry->transaction, + __entry->card_index, + __entry->generation, + __entry->scode, + ASYNC_HEADER_GET_DESTINATION(__entry->header), + ASYNC_HEADER_GET_TLABEL(__entry->header), + ASYNC_HEADER_GET_TCODE(__entry->header), + ASYNC_HEADER_GET_SOURCE(__entry->header), + ASYNC_HEADER_GET_OFFSET(__entry->header), + __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE), + __print_array(__get_dynamic_array(data), + __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE) + ) +); + +// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem. +DECLARE_EVENT_CLASS(async_outbound_complete_template, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp), + TP_STRUCT__entry( + __field(u64, transaction) + __field(u8, card_index) + __field(u8, generation) + __field(u8, scode) + __field(u8, status) + __field(u16, timestamp) + ), + TP_fast_assign( + __entry->transaction = transaction; + __entry->card_index = card_index; + __entry->generation = generation; + __entry->scode = scode; + __entry->status = status; + __entry->timestamp = timestamp; + ), + TP_printk( + "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x", + __entry->transaction, + __entry->card_index, + __entry->generation, + __entry->scode, + __entry->status, + __entry->timestamp + ) +); + +// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem. +DECLARE_EVENT_CLASS(async_inbound_template, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count), + TP_STRUCT__entry( + __field(u64, transaction) + __field(u8, card_index) + __field(u8, generation) + __field(u8, scode) + __field(u8, status) + __field(u16, timestamp) + __array(u32, header, ASYNC_HEADER_QUADLET_COUNT) + __dynamic_array(u32, data, data_count) + ), + TP_fast_assign( + __entry->transaction = transaction; + __entry->card_index = card_index; + __entry->generation = generation; + __entry->scode = scode; + __entry->status = status; + __entry->timestamp = timestamp; + memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT); + memcpy(__get_dynamic_array(data), data, __get_dynamic_array_len(data)); + ), + // This format is for the response subaction. + TP_printk( + "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s", + __entry->transaction, + __entry->card_index, + __entry->generation, + __entry->scode, + __entry->status, + __entry->timestamp, + ASYNC_HEADER_GET_DESTINATION(__entry->header), + ASYNC_HEADER_GET_TLABEL(__entry->header), + ASYNC_HEADER_GET_TCODE(__entry->header), + ASYNC_HEADER_GET_SOURCE(__entry->header), + ASYNC_HEADER_GET_RCODE(__entry->header), + __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE), + __print_array(__get_dynamic_array(data), + __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE) + ) +); + +DEFINE_EVENT(async_outbound_initiate_template, async_request_outbound_initiate, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, header, data, data_count) +); + +DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp) +); + +DEFINE_EVENT(async_inbound_template, async_response_inbound, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count) +); + +DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count), + TP_printk( + "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s", + __entry->transaction, + __entry->card_index, + __entry->generation, + __entry->scode, + __entry->status, + __entry->timestamp, + ASYNC_HEADER_GET_DESTINATION(__entry->header), + ASYNC_HEADER_GET_TLABEL(__entry->header), + ASYNC_HEADER_GET_TCODE(__entry->header), + ASYNC_HEADER_GET_SOURCE(__entry->header), + ASYNC_HEADER_GET_OFFSET(__entry->header), + __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE), + __print_array(__get_dynamic_array(data), + __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE) + ) +); + +DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_initiate, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, header, data, data_count), + TP_printk( + "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s", + __entry->transaction, + __entry->card_index, + __entry->generation, + __entry->scode, + ASYNC_HEADER_GET_DESTINATION(__entry->header), + ASYNC_HEADER_GET_TLABEL(__entry->header), + ASYNC_HEADER_GET_TCODE(__entry->header), + ASYNC_HEADER_GET_SOURCE(__entry->header), + ASYNC_HEADER_GET_RCODE(__entry->header), + __print_array(__entry->header, ASYNC_HEADER_QUADLET_COUNT, QUADLET_SIZE), + __print_array(__get_dynamic_array(data), + __get_dynamic_array_len(data) / QUADLET_SIZE, QUADLET_SIZE) + ) +); + +DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete, + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp) +); + +#undef ASYNC_HEADER_GET_DESTINATION +#undef ASYNC_HEADER_GET_TLABEL +#undef ASYNC_HEADER_GET_TCODE +#undef ASYNC_HEADER_GET_SOURCE +#undef ASYNC_HEADER_GET_OFFSET +#undef ASYNC_HEADER_GET_RCODE + +TRACE_EVENT(async_phy_outbound_initiate, + TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, u32 first_quadlet, u32 second_quadlet), + TP_ARGS(packet, card_index, generation, first_quadlet, second_quadlet), + TP_STRUCT__entry( + __field(u64, packet) + __field(u8, card_index) + __field(u8, generation) + __field(u32, first_quadlet) + __field(u32, second_quadlet) + ), + TP_fast_assign( + __entry->packet = packet; + __entry->card_index = card_index; + __entry->generation = generation; + __entry->first_quadlet = first_quadlet; + __entry->second_quadlet = second_quadlet + ), + TP_printk( + "packet=0x%llx card_index=%u generation=%u first_quadlet=0x%08x second_quadlet=0x%08x", + __entry->packet, + __entry->card_index, + __entry->generation, + __entry->first_quadlet, + __entry->second_quadlet + ) +); + +TRACE_EVENT(async_phy_outbound_complete, + TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp), + TP_ARGS(packet, card_index, generation, status, timestamp), + TP_STRUCT__entry( + __field(u64, packet) + __field(u8, card_index) + __field(u8, generation) + __field(u8, status) + __field(u16, timestamp) + ), + TP_fast_assign( + __entry->packet = packet; + __entry->card_index = card_index; + __entry->generation = generation; + __entry->status = status; + __entry->timestamp = timestamp; + ), + TP_printk( + "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x", + __entry->packet, + __entry->card_index, + __entry->generation, + __entry->status, + __entry->timestamp + ) +); + +TRACE_EVENT(async_phy_inbound, + TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet), + TP_ARGS(packet, card_index, generation, status, timestamp, first_quadlet, second_quadlet), + TP_STRUCT__entry( + __field(u64, packet) + __field(u8, card_index) + __field(u8, generation) + __field(u8, status) + __field(u16, timestamp) + __field(u32, first_quadlet) + __field(u32, second_quadlet) + ), + TP_fast_assign( + __entry->packet = packet; + __entry->generation = generation; + __entry->status = status; + __entry->timestamp = timestamp; + __entry->first_quadlet = first_quadlet; + __entry->second_quadlet = second_quadlet + ), + TP_printk( + "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x", + __entry->packet, + __entry->card_index, + __entry->generation, + __entry->status, + __entry->timestamp, + __entry->first_quadlet, + __entry->second_quadlet + ) +); + +DECLARE_EVENT_CLASS(bus_reset_arrange_template, + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset), + TP_STRUCT__entry( + __field(u8, card_index) + __field(u8, generation) + __field(bool, short_reset) + ), + TP_fast_assign( + __entry->card_index = card_index; + __entry->generation = generation; + __entry->short_reset = short_reset; + ), + TP_printk( + "card_index=%u generation=%u short_reset=%s", + __entry->card_index, + __entry->generation, + __entry->short_reset ? "true" : "false" + ) +); + +DEFINE_EVENT(bus_reset_arrange_template, bus_reset_initiate, + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset) +); + +DEFINE_EVENT(bus_reset_arrange_template, bus_reset_schedule, + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset) +); + +DEFINE_EVENT(bus_reset_arrange_template, bus_reset_postpone, + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset) +); + +TRACE_EVENT(bus_reset_handle, + TP_PROTO(unsigned int card_index, unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count), + TP_ARGS(card_index, generation, node_id, bm_abdicate, self_ids, self_id_count), + TP_STRUCT__entry( + __field(u8, card_index) + __field(u8, generation) + __field(u8, node_id) + __field(bool, bm_abdicate) + __dynamic_array(u32, self_ids, self_id_count) + ), + TP_fast_assign( + __entry->card_index = card_index; + __entry->generation = generation; + __entry->node_id = node_id; + __entry->bm_abdicate = bm_abdicate; + memcpy(__get_dynamic_array(self_ids), self_ids, __get_dynamic_array_len(self_ids)); + ), + TP_printk( + "card_index=%u generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s", + __entry->card_index, + __entry->generation, + __entry->node_id, + __entry->bm_abdicate ? "true" : "false", + __print_array(__get_dynamic_array(self_ids), + __get_dynamic_array_len(self_ids) / QUADLET_SIZE, QUADLET_SIZE) + ) +); + +#undef QUADLET_SIZE + +#endif // _FIREWIRE_TRACE_EVENT_H + +#include <trace/define_trace.h> diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h index 97b09fcf7e..86fe6aecff 100644 --- a/include/trace/events/fs_dax.h +++ b/include/trace/events/fs_dax.h @@ -62,14 +62,14 @@ DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done); DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, TP_PROTO(struct inode *inode, struct vm_fault *vmf, - struct page *zero_page, + struct folio *zero_folio, void *radix_entry), - TP_ARGS(inode, vmf, zero_page, radix_entry), + TP_ARGS(inode, vmf, zero_folio, radix_entry), TP_STRUCT__entry( __field(unsigned long, ino) __field(unsigned long, vm_flags) __field(unsigned long, address) - __field(struct page *, zero_page) + __field(struct folio *, zero_folio) __field(void *, radix_entry) __field(dev_t, dev) ), @@ -78,17 +78,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, __entry->ino = inode->i_ino; __entry->vm_flags = vmf->vma->vm_flags; __entry->address = vmf->address; - __entry->zero_page = zero_page; + __entry->zero_folio = zero_folio; __entry->radix_entry = radix_entry; ), - TP_printk("dev %d:%d ino %#lx %s address %#lx zero_page %p " + TP_printk("dev %d:%d ino %#lx %s address %#lx zero_folio %p " "radix_entry %#lx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->vm_flags & VM_SHARED ? "shared" : "private", __entry->address, - __entry->zero_page, + __entry->zero_folio, (unsigned long)__entry->radix_entry ) ) @@ -96,8 +96,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, #define DEFINE_PMD_LOAD_HOLE_EVENT(name) \ DEFINE_EVENT(dax_pmd_load_hole_class, name, \ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ - struct page *zero_page, void *radix_entry), \ - TP_ARGS(inode, vmf, zero_page, radix_entry)) + struct folio *zero_folio, void *radix_entry), \ + TP_ARGS(inode, vmf, zero_folio, radix_entry)) DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole); DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback); diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h index a78d21fa9f..4a2bb2c896 100644 --- a/include/trace/events/habanalabs.h +++ b/include/trace/events/habanalabs.h @@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(habanalabs_mmu_template, ), TP_fast_assign( - __assign_str(dname, dev_name(dev)); + __assign_str(dname); __entry->virt_addr = virt_addr; __entry->phys_addr = phys_addr; __entry->page_size = page_size; @@ -64,7 +64,7 @@ DECLARE_EVENT_CLASS(habanalabs_dma_alloc_template, ), TP_fast_assign( - __assign_str(dname, dev_name(dev)); + __assign_str(dname); __entry->cpu_addr = cpu_addr; __entry->dma_addr = dma_addr; __entry->size = size; @@ -103,7 +103,7 @@ DECLARE_EVENT_CLASS(habanalabs_dma_map_template, ), TP_fast_assign( - __assign_str(dname, dev_name(dev)); + __assign_str(dname); __entry->phys_addr = phys_addr; __entry->dma_addr = dma_addr; __entry->len = len; @@ -141,7 +141,7 @@ DECLARE_EVENT_CLASS(habanalabs_comms_template, ), TP_fast_assign( - __assign_str(dname, dev_name(dev)); + __assign_str(dname); __entry->op_str = op_str; ), @@ -178,7 +178,7 @@ DECLARE_EVENT_CLASS(habanalabs_reg_access_template, ), TP_fast_assign( - __assign_str(dname, dev_name(dev)); + __assign_str(dname); __entry->addr = addr; __entry->val = val; ), diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 6e2ef1d4b0..b5f5369b63 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -174,10 +174,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin, TRACE_EVENT(mm_khugepaged_scan_file, - TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file, + TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file, int present, int swap, int result), - TP_ARGS(mm, page, file, present, swap, result), + TP_ARGS(mm, folio, file, present, swap, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) @@ -190,8 +190,8 @@ TRACE_EVENT(mm_khugepaged_scan_file, TP_fast_assign( __entry->mm = mm; - __entry->pfn = page ? page_to_pfn(page) : -1; - __assign_str(filename, file->f_path.dentry->d_iname); + __entry->pfn = folio ? folio_pfn(folio) : -1; + __assign_str(filename); __entry->present = present; __entry->swap = swap; __entry->result = result; @@ -207,10 +207,10 @@ TRACE_EVENT(mm_khugepaged_scan_file, ); TRACE_EVENT(mm_khugepaged_collapse_file, - TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index, + TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index, bool is_shmem, unsigned long addr, struct file *file, int nr, int result), - TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result), + TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(unsigned long, hpfn) @@ -224,11 +224,11 @@ TRACE_EVENT(mm_khugepaged_collapse_file, TP_fast_assign( __entry->mm = mm; - __entry->hpfn = hpage ? page_to_pfn(hpage) : -1; + __entry->hpfn = new_folio ? folio_pfn(new_folio) : -1; __entry->index = index; __entry->addr = addr; __entry->is_shmem = is_shmem; - __assign_str(filename, file->f_path.dentry->d_iname); + __assign_str(filename); __entry->nr = nr; __entry->result = result; ), diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/hw_pressure.h index b686802013..b9cd688541 100644 --- a/include/trace/events/thermal_pressure.h +++ b/include/trace/events/hw_pressure.h @@ -1,27 +1,27 @@ /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM -#define TRACE_SYSTEM thermal_pressure +#define TRACE_SYSTEM hw_pressure #if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_THERMAL_PRESSURE_H #include <linux/tracepoint.h> -TRACE_EVENT(thermal_pressure_update, - TP_PROTO(int cpu, unsigned long thermal_pressure), - TP_ARGS(cpu, thermal_pressure), +TRACE_EVENT(hw_pressure_update, + TP_PROTO(int cpu, unsigned long hw_pressure), + TP_ARGS(cpu, hw_pressure), TP_STRUCT__entry( - __field(unsigned long, thermal_pressure) + __field(unsigned long, hw_pressure) __field(int, cpu) ), TP_fast_assign( - __entry->thermal_pressure = thermal_pressure; + __entry->hw_pressure = hw_pressure; __entry->cpu = cpu; ), - TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure) + TP_printk("cpu=%d hw_pressure=%lu", __entry->cpu, __entry->hw_pressure) ); #endif /* _TRACE_THERMAL_PRESSURE_H */ diff --git a/include/trace/events/hwmon.h b/include/trace/events/hwmon.h index d7a1d0ffb6..d1ff560cd9 100644 --- a/include/trace/events/hwmon.h +++ b/include/trace/events/hwmon.h @@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(hwmon_attr_class, TP_fast_assign( __entry->index = index; - __assign_str(attr_name, attr_name); + __assign_str(attr_name); __entry->val = val; ), @@ -57,8 +57,8 @@ TRACE_EVENT(hwmon_attr_show_string, TP_fast_assign( __entry->index = index; - __assign_str(attr_name, attr_name); - __assign_str(label, s); + __assign_str(attr_name); + __assign_str(label); ), TP_printk("index=%d, attr_name=%s, val=%s", diff --git a/include/trace/events/icmp.h b/include/trace/events/icmp.h new file mode 100644 index 0000000000..3155979694 --- /dev/null +++ b/include/trace/events/icmp.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM icmp + +#if !defined(_TRACE_ICMP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ICMP_H + +#include <linux/icmp.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(icmp_send, + + TP_PROTO(const struct sk_buff *skb, int type, int code), + + TP_ARGS(skb, type, code), + + TP_STRUCT__entry( + __field(const void *, skbaddr) + __field(int, type) + __field(int, code) + __array(__u8, saddr, 4) + __array(__u8, daddr, 4) + __field(__u16, sport) + __field(__u16, dport) + __field(unsigned short, ulen) + ), + + TP_fast_assign( + struct iphdr *iph = ip_hdr(skb); + struct udphdr *uh = udp_hdr(skb); + int proto_4 = iph->protocol; + __be32 *p32; + + __entry->skbaddr = skb; + __entry->type = type; + __entry->code = code; + + if (proto_4 != IPPROTO_UDP || (u8 *)uh < skb->head || + (u8 *)uh + sizeof(struct udphdr) + > skb_tail_pointer(skb)) { + __entry->sport = 0; + __entry->dport = 0; + __entry->ulen = 0; + } else { + __entry->sport = ntohs(uh->source); + __entry->dport = ntohs(uh->dest); + __entry->ulen = ntohs(uh->len); + } + + p32 = (__be32 *) __entry->saddr; + *p32 = iph->saddr; + + p32 = (__be32 *) __entry->daddr; + *p32 = iph->daddr; + ), + + TP_printk("icmp_send: type=%d, code=%d. From %pI4:%u to %pI4:%u ulen=%d skbaddr=%p", + __entry->type, __entry->code, + __entry->saddr, __entry->sport, __entry->daddr, + __entry->dport, __entry->ulen, __entry->skbaddr) +); + +#endif /* _TRACE_ICMP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> + diff --git a/include/trace/events/initcall.h b/include/trace/events/initcall.h index eb903c3f19..5282afdf3d 100644 --- a/include/trace/events/initcall.h +++ b/include/trace/events/initcall.h @@ -18,7 +18,7 @@ TRACE_EVENT(initcall_level, ), TP_fast_assign( - __assign_str(level, level); + __assign_str(level); ), TP_printk("level=%s", __get_str(level)) diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index 8ce2de120f..0d88ebf2c9 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -28,7 +28,7 @@ TRACE_EVENT(ifs_status, __entry->status = status; ), - TP_printk("batch: %.2d, start: %.4x, stop: %.4x, status: %.16llx", + TP_printk("batch: 0x%.2x, start: 0x%.4x, stop: 0x%.4x, status: 0x%.16llx", __entry->batch, __entry->start, __entry->stop, diff --git a/include/trace/events/intel_ish.h b/include/trace/events/intel_ish.h index e6d7ff55ee..64b6612c41 100644 --- a/include/trace/events/intel_ish.h +++ b/include/trace/events/intel_ish.h @@ -18,7 +18,7 @@ TRACE_EVENT(ishtp_dump, ), TP_fast_assign( - __assign_str(message, message); + __assign_str(message); ), TP_printk("%s", __get_str(message)) diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index e948df7ce6..412c9c210a 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -164,7 +164,7 @@ TRACE_EVENT(io_uring_queue_async_work, __entry->work = &req->work; __entry->rw = rw; - __assign_str(op_str, io_uring_get_opcode(req->opcode)); + __assign_str(op_str); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p", @@ -202,7 +202,7 @@ TRACE_EVENT(io_uring_defer, __entry->data = req->cqe.user_data; __entry->opcode = req->opcode; - __assign_str(op_str, io_uring_get_opcode(req->opcode)); + __assign_str(op_str); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", @@ -303,7 +303,7 @@ TRACE_EVENT(io_uring_fail_link, __entry->opcode = req->opcode; __entry->link = link; - __assign_str(op_str, io_uring_get_opcode(req->opcode)); + __assign_str(op_str); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", @@ -392,7 +392,7 @@ TRACE_EVENT(io_uring_submit_req, __entry->flags = (__force unsigned long long) req->flags; __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL; - __assign_str(op_str, io_uring_get_opcode(req->opcode)); + __assign_str(op_str); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%llx, " @@ -436,7 +436,7 @@ TRACE_EVENT(io_uring_poll_arm, __entry->mask = mask; __entry->events = events; - __assign_str(op_str, io_uring_get_opcode(req->opcode)); + __assign_str(op_str); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", @@ -475,7 +475,7 @@ TRACE_EVENT(io_uring_task_add, __entry->opcode = req->opcode; __entry->mask = mask; - __assign_str(op_str, io_uring_get_opcode(req->opcode)); + __assign_str(op_str); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", @@ -538,7 +538,7 @@ TRACE_EVENT(io_uring_req_failed, __entry->addr3 = sqe->addr3; __entry->error = error; - __assign_str(op_str, io_uring_get_opcode(sqe->opcode)); + __assign_str(op_str); ), TP_printk("ring %p, req %p, user_data 0x%llx, " diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h index af8bfed528..e772b1bc60 100644 --- a/include/trace/events/iocost.h +++ b/include/trace/events/iocost.h @@ -34,8 +34,8 @@ DECLARE_EVENT_CLASS(iocost_iocg_state, ), TP_fast_assign( - __assign_str(devname, ioc_name(iocg->ioc)); - __assign_str(cgroup, path); + __assign_str(devname); + __assign_str(cgroup); __entry->now = now->now; __entry->vnow = now->vnow; __entry->vrate = iocg->ioc->vtime_base_rate; @@ -93,8 +93,8 @@ DECLARE_EVENT_CLASS(iocg_inuse_update, ), TP_fast_assign( - __assign_str(devname, ioc_name(iocg->ioc)); - __assign_str(cgroup, path); + __assign_str(devname); + __assign_str(cgroup); __entry->now = now->now; __entry->old_inuse = old_inuse; __entry->new_inuse = new_inuse; @@ -159,7 +159,7 @@ TRACE_EVENT(iocost_ioc_vrate_adj, ), TP_fast_assign( - __assign_str(devname, ioc_name(ioc)); + __assign_str(devname); __entry->old_vrate = ioc->vtime_base_rate; __entry->new_vrate = new_vrate; __entry->busy_level = ioc->busy_level; @@ -200,8 +200,8 @@ TRACE_EVENT(iocost_iocg_forgive_debt, ), TP_fast_assign( - __assign_str(devname, ioc_name(iocg->ioc)); - __assign_str(cgroup, path); + __assign_str(devname); + __assign_str(cgroup); __entry->now = now->now; __entry->vnow = now->vnow; __entry->usage_pct = usage_pct; diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h index 70743db1fb..373007e567 100644 --- a/include/trace/events/iommu.h +++ b/include/trace/events/iommu.h @@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(iommu_group_event, TP_fast_assign( __entry->gid = group_id; - __assign_str(device, dev_name(dev)); + __assign_str(device); ), TP_printk("IOMMU: groupID=%d device=%s", @@ -62,7 +62,7 @@ DECLARE_EVENT_CLASS(iommu_device_event, ), TP_fast_assign( - __assign_str(device, dev_name(dev)); + __assign_str(device); ), TP_printk("IOMMU: device=%s", __get_str(device) @@ -138,8 +138,8 @@ DECLARE_EVENT_CLASS(iommu_error, ), TP_fast_assign( - __assign_str(device, dev_name(dev)); - __assign_str(driver, dev_driver_string(dev)); + __assign_str(device); + __assign_str(driver); __entry->iova = iova; __entry->flags = flags; ), diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index a07b4607b6..837c1740d0 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -63,7 +63,7 @@ TRACE_EVENT(irq_handler_entry, TP_fast_assign( __entry->irq = irq; - __assign_str(name, action->name); + __assign_str(name); ), TP_printk("irq=%d name=%s", __entry->irq, __get_str(name)) diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h index 8ff2a3ca5d..990fd154f5 100644 --- a/include/trace/events/iscsi.h +++ b/include/trace/events/iscsi.h @@ -30,7 +30,7 @@ DECLARE_EVENT_CLASS(iscsi_log_msg, ), TP_fast_assign( - __assign_str(dname, dev_name(dev)); + __assign_str(dname); __assign_vstr(msg, vaf->fmt, vaf->va); ), diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 6e62cc64cd..8a829e0f6e 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -126,7 +126,7 @@ TRACE_EVENT(kmem_cache_free, TP_fast_assign( __entry->call_site = call_site; __entry->ptr = ptr; - __assign_str(name, s->name); + __assign_str(name); ), TP_printk("call_site=%pS ptr=%p name=%s", diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 011fba6b55..74e40d5d4a 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -456,21 +456,6 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->start, __entry->end) ); -TRACE_EVENT(kvm_set_spte_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) -); - TRACE_EVENT(kvm_age_hva, TP_PROTO(unsigned long start, unsigned long end), TP_ARGS(start, end), diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h index 9ebd081e05..8e89baa377 100644 --- a/include/trace/events/lock.h +++ b/include/trace/events/lock.h @@ -37,7 +37,7 @@ TRACE_EVENT(lock_acquire, TP_fast_assign( __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); - __assign_str(name, lock->name); + __assign_str(name); __entry->lockdep_addr = lock; ), @@ -59,7 +59,7 @@ DECLARE_EVENT_CLASS(lock, ), TP_fast_assign( - __assign_str(name, lock->name); + __assign_str(name); __entry->lockdep_addr = lock; ), diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h index 1391ada0da..f0f7b3cb20 100644 --- a/include/trace/events/mce.h +++ b/include/trace/events/mce.h @@ -9,6 +9,14 @@ #include <linux/tracepoint.h> #include <asm/mce.h> +/* + * MCE Event Record. + * + * Only very relevant and transient information which cannot be + * gathered from a system by any other means or which can only be + * acquired arduously should be added to this record. + */ + TRACE_EVENT(mce_record, TP_PROTO(struct mce *m), @@ -25,6 +33,7 @@ TRACE_EVENT(mce_record, __field( u64, ipid ) __field( u64, ip ) __field( u64, tsc ) + __field( u64, ppin ) __field( u64, walltime ) __field( u32, cpu ) __field( u32, cpuid ) @@ -33,6 +42,7 @@ TRACE_EVENT(mce_record, __field( u8, cs ) __field( u8, bank ) __field( u8, cpuvendor ) + __field( u32, microcode ) ), TP_fast_assign( @@ -45,6 +55,7 @@ TRACE_EVENT(mce_record, __entry->ipid = m->ipid; __entry->ip = m->ip; __entry->tsc = m->tsc; + __entry->ppin = m->ppin; __entry->walltime = m->time; __entry->cpu = m->extcpu; __entry->cpuid = m->cpuid; @@ -53,20 +64,26 @@ TRACE_EVENT(mce_record, __entry->cs = m->cs; __entry->bank = m->bank; __entry->cpuvendor = m->cpuvendor; + __entry->microcode = m->microcode; ), - TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR/MISC/SYND: %016Lx/%016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", + TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR: %016Lx, MISC: %016Lx, SYND: %016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x", __entry->cpu, __entry->mcgcap, __entry->mcgstatus, __entry->bank, __entry->status, __entry->ipid, - __entry->addr, __entry->misc, __entry->synd, + __entry->addr, + __entry->misc, + __entry->synd, __entry->cs, __entry->ip, __entry->tsc, - __entry->cpuvendor, __entry->cpuid, + __entry->ppin, + __entry->cpuvendor, + __entry->cpuid, __entry->walltime, __entry->socketid, - __entry->apicid) + __entry->apicid, + __entry->microcode) ); #endif /* _TRACE_MCE_H */ diff --git a/include/trace/events/mdio.h b/include/trace/events/mdio.h index 0f241cbe00..285b3e4f83 100644 --- a/include/trace/events/mdio.h +++ b/include/trace/events/mdio.h @@ -25,7 +25,7 @@ TRACE_EVENT_CONDITION(mdio_access, ), TP_fast_assign( - strncpy(__entry->busid, bus->id, MII_BUS_ID_SIZE); + strscpy(__entry->busid, bus->id, MII_BUS_ID_SIZE); __entry->read = read; __entry->addr = addr; __entry->regnum = regnum; diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h index 14db8044c1..f2827f98a4 100644 --- a/include/trace/events/mmap_lock.h +++ b/include/trace/events/mmap_lock.h @@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(mmap_lock, TP_fast_assign( __entry->mm = mm; - __assign_str(memcg_path, memcg_path); + __assign_str(memcg_path); __entry->write = write; ), @@ -65,7 +65,7 @@ TRACE_EVENT_FN(mmap_lock_acquire_returned, TP_fast_assign( __entry->mm = mm; - __assign_str(memcg_path, memcg_path); + __assign_str(memcg_path); __entry->write = write; __entry->success = success; ), diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h index 7b706ff213..f1c2e94f7f 100644 --- a/include/trace/events/mmc.h +++ b/include/trace/events/mmc.h @@ -68,7 +68,7 @@ TRACE_EVENT(mmc_request_start, __entry->need_retune = host->need_retune; __entry->hold_retune = host->hold_retune; __entry->retune_period = host->retune_period; - __assign_str(name, mmc_hostname(host)); + __assign_str(name); __entry->mrq = mrq; ), @@ -156,7 +156,7 @@ TRACE_EVENT(mmc_request_done, __entry->need_retune = host->need_retune; __entry->hold_retune = host->hold_retune; __entry->retune_period = host->retune_period; - __assign_str(name, mmc_hostname(host)); + __assign_str(name); __entry->mrq = mrq; ), diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index d55e53ac91..e46d6e8276 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -107,7 +107,6 @@ DEF_PAGEFLAG_NAME(lru), \ DEF_PAGEFLAG_NAME(active), \ DEF_PAGEFLAG_NAME(workingset), \ - DEF_PAGEFLAG_NAME(slab), \ DEF_PAGEFLAG_NAME(owner_priv_1), \ DEF_PAGEFLAG_NAME(arch_1), \ DEF_PAGEFLAG_NAME(reserved), \ @@ -135,6 +134,7 @@ IF_HAVE_PG_ARCH_X(arch_3) #define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) } #define __def_pagetype_names \ + DEF_PAGETYPE_NAME(slab), \ DEF_PAGETYPE_NAME(hugetlb), \ DEF_PAGETYPE_NAME(offline), \ DEF_PAGETYPE_NAME(guard), \ diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 097485c73c..e5a006be9d 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h @@ -41,7 +41,7 @@ TRACE_EVENT(module_load, TP_fast_assign( __entry->taints = mod->taints; - __assign_str(name, mod->name); + __assign_str(name); ), TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints)) @@ -58,7 +58,7 @@ TRACE_EVENT(module_free, ), TP_fast_assign( - __assign_str(name, mod->name); + __assign_str(name); ), TP_printk("%s", __get_str(name)) @@ -82,7 +82,7 @@ DECLARE_EVENT_CLASS(module_refcnt, TP_fast_assign( __entry->ip = ip; __entry->refcnt = atomic_read(&mod->refcnt); - __assign_str(name, mod->name); + __assign_str(name); ), TP_printk("%s call_site=%ps refcnt=%d", @@ -119,7 +119,7 @@ TRACE_EVENT(module_request, TP_fast_assign( __entry->ip = ip; __entry->wait = wait; - __assign_str(name, name); + __assign_str(name); ), TP_printk("%s wait=%d call_site=%ps", diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index dc03cf8e03..b567b9ffed 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h @@ -26,7 +26,7 @@ TRACE_EVENT(napi_poll, TP_fast_assign( __entry->napi = napi; - __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV); + __assign_str(dev_name); __entry->work = work; __entry->budget = budget; ), diff --git a/include/trace/events/nbd.h b/include/trace/events/nbd.h index 9849956f34..390d98a05c 100644 --- a/include/trace/events/nbd.h +++ b/include/trace/events/nbd.h @@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(nbd_send_request, ), TP_fast_assign( - __entry->nbd_request = 0; + __entry->nbd_request = NULL; __entry->dev_index = index; __entry->request = rq; ), diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h index 833143d099..12362c35db 100644 --- a/include/trace/events/neigh.h +++ b/include/trace/events/neigh.h @@ -42,7 +42,7 @@ TRACE_EVENT(neigh_create, __be32 *p32; __entry->family = tbl->family; - __assign_str(dev, (dev ? dev->name : "NULL")); + __assign_str(dev); __entry->entries = atomic_read(&tbl->gc_entries); __entry->created = n != NULL; __entry->gc_exempt = exempt_from_gc; @@ -103,7 +103,7 @@ TRACE_EVENT(neigh_update, __be32 *p32; __entry->family = n->tbl->family; - __assign_str(dev, (n->dev ? n->dev->name : "NULL")); + __assign_str(dev); __entry->lladdr_len = lladdr_len; memcpy(__entry->lladdr, n->ha, lladdr_len); __entry->flags = n->flags; @@ -180,7 +180,7 @@ DECLARE_EVENT_CLASS(neigh__update, __be32 *p32; __entry->family = n->tbl->family; - __assign_str(dev, (n->dev ? n->dev->name : "NULL")); + __assign_str(dev); __entry->lladdr_len = lladdr_len; memcpy(__entry->lladdr, n->ha, lladdr_len); __entry->flags = n->flags; diff --git a/include/trace/events/net.h b/include/trace/events/net.h index f667c76a3b..d55162c12f 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -38,7 +38,7 @@ TRACE_EVENT(net_dev_start_xmit, ), TP_fast_assign( - __assign_str(name, dev->name); + __assign_str(name); __entry->queue_mapping = skb->queue_mapping; __entry->skbaddr = skb; __entry->vlan_tagged = skb_vlan_tag_present(skb); @@ -89,7 +89,7 @@ TRACE_EVENT(net_dev_xmit, __entry->skbaddr = skb; __entry->len = skb_len; __entry->rc = rc; - __assign_str(name, dev->name); + __assign_str(name); ), TP_printk("dev=%s skbaddr=%p len=%u rc=%d", @@ -110,8 +110,8 @@ TRACE_EVENT(net_dev_xmit_timeout, ), TP_fast_assign( - __assign_str(name, dev->name); - __assign_str(driver, netdev_drivername(dev)); + __assign_str(name); + __assign_str(driver); __entry->queue_index = queue_index; ), @@ -134,7 +134,7 @@ DECLARE_EVENT_CLASS(net_dev_template, TP_fast_assign( __entry->skbaddr = skb; __entry->len = skb->len; - __assign_str(name, skb->dev->name); + __assign_str(name); ), TP_printk("dev=%s skbaddr=%p len=%u", @@ -191,7 +191,7 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template, ), TP_fast_assign( - __assign_str(name, skb->dev->name); + __assign_str(name); #ifdef CONFIG_NET_RX_BUSY_POLL __entry->napi_id = skb->napi_id; #else diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h index 3930119cab..976a58364b 100644 --- a/include/trace/events/net_probe_common.h +++ b/include/trace/events/net_probe_common.h @@ -41,4 +41,75 @@ #endif +#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \ + do { \ + struct in6_addr *pin6; \ + \ + pin6 = (struct in6_addr *)__entry->saddr_v6; \ + ipv6_addr_set_v4mapped(saddr, pin6); \ + pin6 = (struct in6_addr *)__entry->daddr_v6; \ + ipv6_addr_set_v4mapped(daddr, pin6); \ + } while (0) + +#if IS_ENABLED(CONFIG_IPV6) +#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ + do { \ + if (sk->sk_family == AF_INET6) { \ + struct in6_addr *pin6; \ + \ + pin6 = (struct in6_addr *)__entry->saddr_v6; \ + *pin6 = saddr6; \ + pin6 = (struct in6_addr *)__entry->daddr_v6; \ + *pin6 = daddr6; \ + } else { \ + TP_STORE_V4MAPPED(__entry, saddr, daddr); \ + } \ + } while (0) +#else +#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ + TP_STORE_V4MAPPED(__entry, saddr, daddr) +#endif + +#define TP_STORE_ADDR_PORTS_SKB_V4(skb, protoh, entry_saddr, entry_daddr) \ + do { \ + struct sockaddr_in *v4 = (void *)entry_saddr; \ + \ + v4->sin_family = AF_INET; \ + v4->sin_port = protoh->source; \ + v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \ + v4 = (void *)entry_daddr; \ + v4->sin_family = AF_INET; \ + v4->sin_port = protoh->dest; \ + v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \ + } while (0) + +#if IS_ENABLED(CONFIG_IPV6) + +#define TP_STORE_ADDR_PORTS_SKB(skb, protoh, entry_saddr, entry_daddr) \ + do { \ + const struct iphdr *iph = ip_hdr(skb); \ + \ + if (iph->version == 6) { \ + struct sockaddr_in6 *v6 = (void *)entry_saddr; \ + \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = protoh->source; \ + v6->sin6_addr = ipv6_hdr(skb)->saddr; \ + v6 = (void *)entry_daddr; \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = protoh->dest; \ + v6->sin6_addr = ipv6_hdr(skb)->daddr; \ + } else \ + TP_STORE_ADDR_PORTS_SKB_V4(skb, protoh, \ + entry_saddr, \ + entry_daddr); \ + } while (0) + +#else + +#define TP_STORE_ADDR_PORTS_SKB(skb, protoh, entry_saddr, entry_daddr) \ + TP_STORE_ADDR_PORTS_SKB_V4(skb, protoh, entry_saddr, entry_daddr) + +#endif + #endif diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 447a8c21cf..da23484268 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -24,8 +24,8 @@ E_(netfs_read_trace_write_begin, "WRITEBEGN") #define netfs_write_traces \ + EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \ EM(netfs_write_trace_dio_write, "DIO-WRITE") \ - EM(netfs_write_trace_launder, "LAUNDER ") \ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \ EM(netfs_write_trace_writeback, "WRITEBACK") \ E_(netfs_write_trace_writethrough, "WRITETHRU") @@ -34,9 +34,9 @@ EM(NETFS_READAHEAD, "RA") \ EM(NETFS_READPAGE, "RP") \ EM(NETFS_READ_FOR_WRITE, "RW") \ + EM(NETFS_COPY_TO_CACHE, "CC") \ EM(NETFS_WRITEBACK, "WB") \ EM(NETFS_WRITETHROUGH, "WT") \ - EM(NETFS_LAUNDER_WRITE, "LW") \ EM(NETFS_UNBUFFERED_WRITE, "UW") \ EM(NETFS_DIO_READ, "DR") \ E_(NETFS_DIO_WRITE, "DW") @@ -44,14 +44,18 @@ #define netfs_rreq_traces \ EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_copy, "COPY ") \ + EM(netfs_rreq_trace_collect, "COLLECT") \ EM(netfs_rreq_trace_done, "DONE ") \ EM(netfs_rreq_trace_free, "FREE ") \ EM(netfs_rreq_trace_redirty, "REDIRTY") \ EM(netfs_rreq_trace_resubmit, "RESUBMT") \ + EM(netfs_rreq_trace_set_pause, "PAUSE ") \ EM(netfs_rreq_trace_unlock, "UNLOCK ") \ EM(netfs_rreq_trace_unmark, "UNMARK ") \ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \ + EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \ + EM(netfs_rreq_trace_unpause, "UNPAUSE") \ E_(netfs_rreq_trace_write_done, "WR-DONE") #define netfs_sreq_sources \ @@ -64,11 +68,15 @@ E_(NETFS_INVALID_WRITE, "INVL") #define netfs_sreq_traces \ + EM(netfs_sreq_trace_discard, "DSCRD") \ EM(netfs_sreq_trace_download_instead, "RDOWN") \ + EM(netfs_sreq_trace_fail, "FAIL ") \ EM(netfs_sreq_trace_free, "FREE ") \ EM(netfs_sreq_trace_limited, "LIMIT") \ EM(netfs_sreq_trace_prepare, "PREP ") \ + EM(netfs_sreq_trace_prep_failed, "PRPFL") \ EM(netfs_sreq_trace_resubmit_short, "SHORT") \ + EM(netfs_sreq_trace_retry, "RETRY") \ EM(netfs_sreq_trace_submit, "SUBMT") \ EM(netfs_sreq_trace_terminated, "TERM ") \ EM(netfs_sreq_trace_write, "WRITE") \ @@ -88,6 +96,7 @@ #define netfs_rreq_ref_traces \ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \ EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ + EM(netfs_rreq_trace_get_work, "GET WORK ") \ EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \ EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ @@ -95,19 +104,25 @@ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \ EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ EM(netfs_rreq_trace_put_work, "PUT WORK ") \ + EM(netfs_rreq_trace_put_work_complete, "PUT WORK CP") \ + EM(netfs_rreq_trace_put_work_nq, "PUT WORK NQ") \ EM(netfs_rreq_trace_see_work, "SEE WORK ") \ E_(netfs_rreq_trace_new, "NEW ") #define netfs_sreq_ref_traces \ EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \ EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \ + EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ EM(netfs_sreq_trace_new, "NEW ") \ + EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \ EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \ EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \ + EM(netfs_sreq_trace_put_done, "PUT DONE ") \ EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \ EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \ EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \ + EM(netfs_sreq_trace_put_oom, "PUT OOM ") \ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \ EM(netfs_sreq_trace_put_work, "PUT WORK ") \ E_(netfs_sreq_trace_put_terminated, "PUT TERM ") @@ -124,24 +139,33 @@ EM(netfs_streaming_filled_page, "mod-streamw-f") \ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \ /* The rest are for writeback */ \ + EM(netfs_folio_trace_cancel_copy, "cancel-copy") \ EM(netfs_folio_trace_clear, "clear") \ - EM(netfs_folio_trace_clear_s, "clear-s") \ + EM(netfs_folio_trace_clear_cc, "clear-cc") \ EM(netfs_folio_trace_clear_g, "clear-g") \ - EM(netfs_folio_trace_copy_to_cache, "copy") \ - EM(netfs_folio_trace_end_copy, "end-copy") \ + EM(netfs_folio_trace_clear_s, "clear-s") \ + EM(netfs_folio_trace_copy_to_cache, "mark-copy") \ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \ EM(netfs_folio_trace_kill, "kill") \ - EM(netfs_folio_trace_launder, "launder") \ + EM(netfs_folio_trace_kill_cc, "kill-cc") \ + EM(netfs_folio_trace_kill_g, "kill-g") \ + EM(netfs_folio_trace_kill_s, "kill-s") \ EM(netfs_folio_trace_mkwrite, "mkwrite") \ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \ + EM(netfs_folio_trace_not_under_wback, "!wback") \ EM(netfs_folio_trace_read_gaps, "read-gaps") \ - EM(netfs_folio_trace_redirty, "redirty") \ EM(netfs_folio_trace_redirtied, "redirtied") \ EM(netfs_folio_trace_store, "store") \ + EM(netfs_folio_trace_store_copy, "store-copy") \ EM(netfs_folio_trace_store_plus, "store+") \ EM(netfs_folio_trace_wthru, "wthru") \ E_(netfs_folio_trace_wthru_plus, "wthru+") +#define netfs_collect_contig_traces \ + EM(netfs_contig_trace_collect, "Collect") \ + EM(netfs_contig_trace_jump, "-->JUMP-->") \ + E_(netfs_contig_trace_unlock, "Unlock") + #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY @@ -158,6 +182,7 @@ enum netfs_failure { netfs_failures } __mode(byte); enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte); enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); enum netfs_folio_trace { netfs_folio_traces } __mode(byte); +enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte); #endif @@ -179,6 +204,7 @@ netfs_failures; netfs_rreq_ref_traces; netfs_sreq_ref_traces; netfs_folio_traces; +netfs_collect_contig_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -279,7 +305,7 @@ TRACE_EVENT(netfs_sreq, __entry->start = sreq->start; ), - TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d", + TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d", __entry->rreq, __entry->index, __print_symbolic(__entry->source, netfs_sreq_sources), __print_symbolic(__entry->what, netfs_sreq_traces), @@ -319,7 +345,7 @@ TRACE_EVENT(netfs_failure, __entry->start = sreq ? sreq->start : 0; ), - TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d", + TP_printk("R=%08x[%x] %s f=%02x s=%llx %zx/%zx %s e=%d", __entry->rreq, __entry->index, __print_symbolic(__entry->source, netfs_sreq_sources), __entry->flags, @@ -412,16 +438,18 @@ TRACE_EVENT(netfs_write_iter, __field(unsigned long long, start ) __field(size_t, len ) __field(unsigned int, flags ) + __field(unsigned int, ino ) ), TP_fast_assign( __entry->start = iocb->ki_pos; __entry->len = iov_iter_count(from); + __entry->ino = iocb->ki_filp->f_inode->i_ino; __entry->flags = iocb->ki_flags; ), - TP_printk("WRITE-ITER s=%llx l=%zx f=%x", - __entry->start, __entry->len, __entry->flags) + TP_printk("WRITE-ITER i=%x s=%llx l=%zx f=%x", + __entry->ino, __entry->start, __entry->len, __entry->flags) ); TRACE_EVENT(netfs_write, @@ -433,9 +461,10 @@ TRACE_EVENT(netfs_write, TP_STRUCT__entry( __field(unsigned int, wreq ) __field(unsigned int, cookie ) + __field(unsigned int, ino ) __field(enum netfs_write_trace, what ) __field(unsigned long long, start ) - __field(size_t, len ) + __field(unsigned long long, len ) ), TP_fast_assign( @@ -443,18 +472,213 @@ TRACE_EVENT(netfs_write, struct fscache_cookie *__cookie = netfs_i_cookie(__ctx); __entry->wreq = wreq->debug_id; __entry->cookie = __cookie ? __cookie->debug_id : 0; + __entry->ino = wreq->inode->i_ino; __entry->what = what; __entry->start = wreq->start; __entry->len = wreq->len; ), - TP_printk("R=%08x %s c=%08x by=%llx-%llx", + TP_printk("R=%08x %s c=%08x i=%x by=%llx-%llx", __entry->wreq, __print_symbolic(__entry->what, netfs_write_traces), __entry->cookie, + __entry->ino, __entry->start, __entry->start + __entry->len - 1) ); +TRACE_EVENT(netfs_collect, + TP_PROTO(const struct netfs_io_request *wreq), + + TP_ARGS(wreq), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned int, len ) + __field(unsigned long long, transferred ) + __field(unsigned long long, start ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->start = wreq->start; + __entry->len = wreq->len; + __entry->transferred = wreq->transferred; + ), + + TP_printk("R=%08x s=%llx-%llx", + __entry->wreq, + __entry->start + __entry->transferred, + __entry->start + __entry->len) + ); + +TRACE_EVENT(netfs_collect_contig, + TP_PROTO(const struct netfs_io_request *wreq, unsigned long long to, + enum netfs_collect_contig_trace type), + + TP_ARGS(wreq, to, type), + + TP_STRUCT__entry( + __field(unsigned int, wreq) + __field(enum netfs_collect_contig_trace, type) + __field(unsigned long long, contiguity) + __field(unsigned long long, to) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->type = type; + __entry->contiguity = wreq->contiguity; + __entry->to = to; + ), + + TP_printk("R=%08x %llx -> %llx %s", + __entry->wreq, + __entry->contiguity, + __entry->to, + __print_symbolic(__entry->type, netfs_collect_contig_traces)) + ); + +TRACE_EVENT(netfs_collect_sreq, + TP_PROTO(const struct netfs_io_request *wreq, + const struct netfs_io_subrequest *subreq), + + TP_ARGS(wreq, subreq), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned int, subreq ) + __field(unsigned int, stream ) + __field(unsigned int, len ) + __field(unsigned int, transferred ) + __field(unsigned long long, start ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->subreq = subreq->debug_index; + __entry->stream = subreq->stream_nr; + __entry->start = subreq->start; + __entry->len = subreq->len; + __entry->transferred = subreq->transferred; + ), + + TP_printk("R=%08x[%u:%02x] s=%llx t=%x/%x", + __entry->wreq, __entry->stream, __entry->subreq, + __entry->start, __entry->transferred, __entry->len) + ); + +TRACE_EVENT(netfs_collect_folio, + TP_PROTO(const struct netfs_io_request *wreq, + const struct folio *folio, + unsigned long long fend, + unsigned long long collected_to), + + TP_ARGS(wreq, folio, fend, collected_to), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned long, index ) + __field(unsigned long long, fend ) + __field(unsigned long long, cleaned_to ) + __field(unsigned long long, collected_to ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->index = folio->index; + __entry->fend = fend; + __entry->cleaned_to = wreq->cleaned_to; + __entry->collected_to = collected_to; + ), + + TP_printk("R=%08x ix=%05lx r=%llx-%llx t=%llx/%llx", + __entry->wreq, __entry->index, + (unsigned long long)__entry->index * PAGE_SIZE, __entry->fend, + __entry->cleaned_to, __entry->collected_to) + ); + +TRACE_EVENT(netfs_collect_state, + TP_PROTO(const struct netfs_io_request *wreq, + unsigned long long collected_to, + unsigned int notes), + + TP_ARGS(wreq, collected_to, notes), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned int, notes ) + __field(unsigned long long, collected_to ) + __field(unsigned long long, cleaned_to ) + __field(unsigned long long, contiguity ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->notes = notes; + __entry->collected_to = collected_to; + __entry->cleaned_to = wreq->cleaned_to; + __entry->contiguity = wreq->contiguity; + ), + + TP_printk("R=%08x cto=%llx fto=%llx ctg=%llx n=%x", + __entry->wreq, __entry->collected_to, + __entry->cleaned_to, __entry->contiguity, + __entry->notes) + ); + +TRACE_EVENT(netfs_collect_gap, + TP_PROTO(const struct netfs_io_request *wreq, + const struct netfs_io_stream *stream, + unsigned long long jump_to, char type), + + TP_ARGS(wreq, stream, jump_to, type), + + TP_STRUCT__entry( + __field(unsigned int, wreq) + __field(unsigned char, stream) + __field(unsigned char, type) + __field(unsigned long long, from) + __field(unsigned long long, to) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->stream = stream->stream_nr; + __entry->from = stream->collected_to; + __entry->to = jump_to; + __entry->type = type; + ), + + TP_printk("R=%08x[%x:] %llx->%llx %c", + __entry->wreq, __entry->stream, + __entry->from, __entry->to, __entry->type) + ); + +TRACE_EVENT(netfs_collect_stream, + TP_PROTO(const struct netfs_io_request *wreq, + const struct netfs_io_stream *stream), + + TP_ARGS(wreq, stream), + + TP_STRUCT__entry( + __field(unsigned int, wreq) + __field(unsigned char, stream) + __field(unsigned long long, collected_to) + __field(unsigned long long, front) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->stream = stream->stream_nr; + __entry->collected_to = stream->collected_to; + __entry->front = stream->front ? stream->front->start : UINT_MAX; + ), + + TP_printk("R=%08x[%x:] cto=%llx frn=%llx", + __entry->wreq, __entry->stream, + __entry->collected_to, __entry->front) + ); + #undef EM #undef E_ #endif /* _TRACE_NETFS_H */ diff --git a/include/trace/events/netlink.h b/include/trace/events/netlink.h index 3b7be3b386..f036b8a205 100644 --- a/include/trace/events/netlink.h +++ b/include/trace/events/netlink.h @@ -17,7 +17,7 @@ TRACE_EVENT(netlink_extack, ), TP_fast_assign( - __assign_str(msg, msg); + __assign_str(msg); ), TP_printk("msg=%s", __get_str(msg)) diff --git a/include/trace/events/nilfs2.h b/include/trace/events/nilfs2.h index 8efc6236f5..8880c11733 100644 --- a/include/trace/events/nilfs2.h +++ b/include/trace/events/nilfs2.h @@ -200,7 +200,11 @@ TRACE_EVENT(nilfs2_mdt_submit_block, __field(struct inode *, inode) __field(unsigned long, ino) __field(unsigned long, blkoff) - __field(enum req_op, mode) + /* + * Use field_struct() to avoid is_signed_type() on the + * bitwise type enum req_op. + */ + __field_struct(enum req_op, mode) ), TP_fast_assign( diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index b799f3bcba..a42be4c856 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -92,7 +92,7 @@ TRACE_EVENT(mark_victim, TP_fast_assign( __entry->pid = task->pid; - __assign_str(comm, task->comm); + __assign_str(comm); __entry->total_vm = PG_COUNT_TO_KB(task->mm->total_vm); __entry->anon_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_ANONPAGES)); __entry->file_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_FILEPAGES)); diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h index 82f741ec0f..a2379a4f06 100644 --- a/include/trace/events/osnoise.h +++ b/include/trace/events/osnoise.h @@ -75,7 +75,7 @@ TRACE_EVENT(irq_noise, ), TP_fast_assign( - __assign_str(desc, desc); + __assign_str(desc); __entry->vector = vector; __entry->start = start; __entry->duration = duration; diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h index 8a99c1cd41..fe33a255b7 100644 --- a/include/trace/events/page_ref.h +++ b/include/trace/events/page_ref.h @@ -30,7 +30,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template, __entry->pfn = page_to_pfn(page); __entry->flags = page->flags; __entry->count = page_ref_count(page); - __entry->mapcount = page_mapcount(page); + __entry->mapcount = atomic_read(&page->_mapcount); __entry->mapping = page->mapping; __entry->mt = get_pageblock_migratetype(page); __entry->val = v; @@ -79,7 +79,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template, __entry->pfn = page_to_pfn(page); __entry->flags = page->flags; __entry->count = page_ref_count(page); - __entry->mapcount = page_mapcount(page); + __entry->mapcount = atomic_read(&page->_mapcount); __entry->mapping = page->mapping; __entry->mt = get_pageblock_migratetype(page); __entry->val = v; diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 77f14f7a11..d2349b6b53 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -76,7 +76,7 @@ TRACE_EVENT(powernv_throttle, TP_fast_assign( __entry->chip_id = chip_id; - __assign_str(reason, reason); + __assign_str(reason); __entry->pmax = pmax; ), @@ -210,11 +210,10 @@ TRACE_EVENT(device_pm_callback_start, ), TP_fast_assign( - __assign_str(device, dev_name(dev)); - __assign_str(driver, dev_driver_string(dev)); - __assign_str(parent, - dev->parent ? dev_name(dev->parent) : "none"); - __assign_str(pm_ops, pm_ops ? pm_ops : "none "); + __assign_str(device); + __assign_str(driver); + __assign_str(parent); + __assign_str(pm_ops); __entry->event = event; ), @@ -236,8 +235,8 @@ TRACE_EVENT(device_pm_callback_end, ), TP_fast_assign( - __assign_str(device, dev_name(dev)); - __assign_str(driver, dev_driver_string(dev)); + __assign_str(device); + __assign_str(driver); __entry->error = error; ), @@ -279,7 +278,7 @@ DECLARE_EVENT_CLASS(wakeup_source, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->state = state; ), @@ -318,7 +317,7 @@ DECLARE_EVENT_CLASS(clock, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->state = state; __entry->cpu_id = cpu_id; ), @@ -364,7 +363,7 @@ DECLARE_EVENT_CLASS(power_domain, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->state = state; __entry->cpu_id = cpu_id; ), @@ -486,7 +485,7 @@ DECLARE_EVENT_CLASS(dev_pm_qos_request, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->type = type; __entry->new_value = new_value; ), diff --git a/include/trace/events/pwc.h b/include/trace/events/pwc.h index a2da764a3b..0543702542 100644 --- a/include/trace/events/pwc.h +++ b/include/trace/events/pwc.h @@ -26,7 +26,7 @@ TRACE_EVENT(pwc_handler_enter, __entry->urb__actual_length = urb->actual_length; __entry->fbuf__filled = (pdev->fill_buf ? pdev->fill_buf->filled : 0); - __assign_str(name, pdev->v4l2_dev.name); + __assign_str(name); ), TP_printk("dev=%s (fbuf=%p filled=%d) urb=%p (status=%d actual_length=%u)", __get_str(name), @@ -50,7 +50,7 @@ TRACE_EVENT(pwc_handler_exit, __entry->urb = urb; __entry->fbuf = pdev->fill_buf; __entry->fbuf__filled = pdev->fill_buf->filled; - __assign_str(name, pdev->v4l2_dev.name); + __assign_str(name); ), TP_printk(" dev=%s (fbuf=%p filled=%d) urb=%p", __get_str(name), diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 061fd49603..ff33f41a9d 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -88,8 +88,8 @@ TRACE_EVENT(qdisc_reset, ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)"); - __assign_str(kind, q->ops->id); + __assign_str(dev); + __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), @@ -113,8 +113,8 @@ TRACE_EVENT(qdisc_destroy, ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q)->name); - __assign_str(kind, q->ops->id); + __assign_str(dev); + __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), @@ -137,8 +137,8 @@ TRACE_EVENT(qdisc_create, ), TP_fast_assign( - __assign_str(dev, dev->name); - __assign_str(kind, ops->id); + __assign_str(dev); + __assign_str(kind); __entry->parent = parent; ), diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h index e7fd55e7dc..8800c35525 100644 --- a/include/trace/events/qla.h +++ b/include/trace/events/qla.h @@ -25,7 +25,7 @@ DECLARE_EVENT_CLASS(qla_log_event, __vstring(msg, vaf->fmt, vaf->va) ), TP_fast_assign( - __assign_str(buf, buf); + __assign_str(buf); __assign_vstr(msg, vaf->fmt, vaf->va); ), diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h index 441132c671..14f8229837 100644 --- a/include/trace/events/qrtr.h +++ b/include/trace/events/qrtr.h @@ -102,7 +102,7 @@ TRACE_EVENT(qrtr_ns_message, ), TP_fast_assign( - __assign_str(ctrl_pkt_str, ctrl_pkt_str); + __assign_str(ctrl_pkt_str); __entry->sq_node = sq_node; __entry->sq_port = sq_port; ), diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 2ef9c71977..31b3e0d3e6 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -708,6 +708,33 @@ TRACE_EVENT_RCU(rcu_invoke_kfree_bulk_callback, ); /* + * Tracepoint for a normal synchronize_rcu() states. The first argument + * is the RCU flavor, the second argument is a pointer to rcu_head the + * last one is an event. + */ +TRACE_EVENT_RCU(rcu_sr_normal, + + TP_PROTO(const char *rcuname, struct rcu_head *rhp, const char *srevent), + + TP_ARGS(rcuname, rhp, srevent), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(void *, rhp) + __field(const char *, srevent) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->rhp = rhp; + __entry->srevent = srevent; + ), + + TP_printk("%s rhp=0x%p event=%s", + __entry->rcuname, __entry->rhp, __entry->srevent) +); + +/* * Tracepoint for exiting rcu_do_batch after RCU callbacks have been * invoked. The first argument is the name of the RCU flavor, * the second argument is number of callbacks actually invoked, diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h index 72b3ba93b0..c58481a5d9 100644 --- a/include/trace/events/regulator.h +++ b/include/trace/events/regulator.h @@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(regulator_basic, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); ), TP_printk("name=%s", __get_str(name)) @@ -119,7 +119,7 @@ DECLARE_EVENT_CLASS(regulator_range, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->min = min; __entry->max = max; ), @@ -152,7 +152,7 @@ DECLARE_EVENT_CLASS(regulator_value, ), TP_fast_assign( - __assign_str(name, name); + __assign_str(name); __entry->val = val; ), diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index f50fcafc69..b0b6300a0c 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -54,7 +54,7 @@ TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN); TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN); #define show_gss_status(x) \ - __print_flags(x, "|", \ + __print_symbolic(x, \ { GSS_S_BAD_MECH, "GSS_S_BAD_MECH" }, \ { GSS_S_BAD_NAME, "GSS_S_BAD_NAME" }, \ { GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" }, \ @@ -154,7 +154,7 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class, TP_fast_assign( __entry->cred = gc; __entry->service = gc->gc_service; - __assign_str(principal, gc->gc_principal); + __assign_str(principal); ), TP_printk("cred=%p service=%s principal='%s'", @@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class, TP_fast_assign( __entry->xid = __be32_to_cpu(rqstp->rq_xid); __entry->maj_stat = maj_stat; - __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x maj_stat=%s", @@ -225,7 +225,7 @@ TRACE_EVENT(rpcgss_svc_wrap_failed, TP_fast_assign( __entry->xid = be32_to_cpu(rqstp->rq_xid); - __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid) @@ -245,7 +245,7 @@ TRACE_EVENT(rpcgss_svc_unwrap_failed, TP_fast_assign( __entry->xid = be32_to_cpu(rqstp->rq_xid); - __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid) @@ -271,7 +271,7 @@ TRACE_EVENT(rpcgss_svc_seqno_bad, __entry->expected = expected; __entry->received = received; __entry->xid = __be32_to_cpu(rqstp->rq_xid); - __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x expected seqno %u, received seqno %u", @@ -299,7 +299,7 @@ TRACE_EVENT(rpcgss_svc_accept_upcall, __entry->minor_status = minor_status; __entry->major_status = major_status; __entry->xid = be32_to_cpu(rqstp->rq_xid); - __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x major_status=%s (0x%08lx) minor_status=%u", @@ -327,7 +327,7 @@ TRACE_EVENT(rpcgss_svc_authenticate, TP_fast_assign( __entry->xid = be32_to_cpu(rqstp->rq_xid); __entry->seqno = gc->gc_seq; - __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x seqno=%u", __get_str(addr), @@ -563,7 +563,7 @@ TRACE_EVENT(rpcgss_upcall_msg, ), TP_fast_assign( - __assign_str(msg, buf); + __assign_str(msg); ), TP_printk("msg='%s'", __get_str(msg)) @@ -618,7 +618,7 @@ TRACE_EVENT(rpcgss_context, __entry->timeout = timeout; __entry->window_size = window_size; __entry->len = len; - __assign_str(acceptor, data); + __assign_str(acceptor); ), TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s", @@ -677,7 +677,7 @@ TRACE_EVENT(rpcgss_oid_to_mech, ), TP_fast_assign( - __assign_str(oid, oid); + __assign_str(oid); ), TP_printk("mech for oid %s was not found", __get_str(oid)) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 027ac3ab45..1439265227 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -304,8 +304,8 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_class, __entry->xid = be32_to_cpu(rep->rr_xid); __entry->version = be32_to_cpu(rep->rr_vers); __entry->proc = be32_to_cpu(rep->rr_proc); - __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt)); - __assign_str(port, rpcrdma_portstr(rep->rr_rxprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u", @@ -335,8 +335,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rxprt, ), TP_fast_assign( - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s", @@ -369,8 +369,8 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class, TP_fast_assign( __entry->rc = rc; __entry->connect_status = r_xprt->rx_ep->re_connect_status; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s rc=%d connection status=%d", @@ -608,8 +608,8 @@ DECLARE_EVENT_CLASS(xprtrdma_callback_class, TP_fast_assign( __entry->xid = be32_to_cpu(rqst->rq_xid); - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s xid=0x%08x", @@ -687,8 +687,8 @@ TRACE_EVENT(xprtrdma_op_connect, TP_fast_assign( __entry->delay = delay; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s delay=%lu", @@ -716,8 +716,8 @@ TRACE_EVENT(xprtrdma_op_set_cto, TP_fast_assign( __entry->connect = connect; __entry->reconnect = reconnect; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu", @@ -746,8 +746,8 @@ TRACE_EVENT(xprtrdma_createmrs, TP_fast_assign( __entry->count = count; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s created %u MRs", @@ -775,8 +775,8 @@ TRACE_EVENT(xprtrdma_nomrs_err, __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s", @@ -1001,8 +1001,8 @@ TRACE_EVENT(xprtrdma_post_recvs, __entry->cq_id = ep->re_attr.recv_cq->res.id; __entry->count = count; __entry->posted = ep->re_receive_count; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active", @@ -1031,8 +1031,8 @@ TRACE_EVENT(xprtrdma_post_recvs_err, __entry->cq_id = ep->re_attr.recv_cq->res.id; __entry->status = status; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s cq.id=%d rc=%d", @@ -1445,8 +1445,8 @@ TRACE_EVENT(xprtrdma_cb_setup, TP_fast_assign( __entry->reqs = reqs; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s %u reqs", @@ -1476,7 +1476,7 @@ DECLARE_EVENT_CLASS(svcrdma_accept_class, TP_fast_assign( __entry->status = status; - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s status=%ld", @@ -1962,7 +1962,7 @@ TRACE_EVENT(svcrdma_send_err, TP_fast_assign( __entry->status = status; __entry->xid = __be32_to_cpu(rqst->rq_xid); - __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), @@ -2025,7 +2025,7 @@ TRACE_EVENT(svcrdma_rq_post_err, TP_fast_assign( __entry->status = status; - __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s status=%d", @@ -2138,7 +2138,7 @@ TRACE_EVENT(svcrdma_qp_error, TP_fast_assign( __entry->event = event->event; - __assign_str(device, event->device->name); + __assign_str(device); snprintf(__entry->addr, sizeof(__entry->addr) - 1, "%pISpc", sap); ), diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h index bd120e23ce..2b0b4b6ef8 100644 --- a/include/trace/events/rpm.h +++ b/include/trace/events/rpm.h @@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(rpm_internal, ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name); __entry->flags = flags; __entry->usage_count = atomic_read( &dev->power.usage_count); @@ -92,7 +92,7 @@ TRACE_EVENT(rpm_return_int, ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name); __entry->ip = ip; __entry->ret = ret; ), @@ -135,7 +135,7 @@ TRACE_EVENT(rpm_status, ), TP_fast_assign( - __assign_str(name, dev_name(dev)); + __assign_str(name); __entry->status = status; ), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index dbb01b4b74..6df2b4685b 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -411,7 +411,7 @@ TRACE_EVENT(sched_process_exec, ), TP_fast_assign( - __assign_str(filename, bprm->filename); + __assign_str(filename); __entry->pid = p->pid; __entry->old_pid = old_pid; ), @@ -420,6 +420,41 @@ TRACE_EVENT(sched_process_exec, __entry->pid, __entry->old_pid) ); +/** + * sched_prepare_exec - called before setting up new exec + * @task: pointer to the current task + * @bprm: pointer to linux_binprm used for new exec + * + * Called before flushing the old exec, where @task is still unchanged, but at + * the point of no return during switching to the new exec. At the point it is + * called the exec will either succeed, or on failure terminate the task. Also + * see the "sched_process_exec" tracepoint, which is called right after @task + * has successfully switched to the new exec. + */ +TRACE_EVENT(sched_prepare_exec, + + TP_PROTO(struct task_struct *task, struct linux_binprm *bprm), + + TP_ARGS(task, bprm), + + TP_STRUCT__entry( + __string( interp, bprm->interp ) + __string( filename, bprm->filename ) + __field( pid_t, pid ) + __string( comm, task->comm ) + ), + + TP_fast_assign( + __assign_str(interp); + __assign_str(filename); + __entry->pid = task->pid; + __assign_str(comm); + ), + + TP_printk("interp=%s filename=%s pid=%d comm=%s", + __get_str(interp), __get_str(filename), + __entry->pid, __get_str(comm)) +); #ifdef CONFIG_SCHEDSTATS #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT @@ -752,7 +787,7 @@ DECLARE_TRACE(pelt_dl_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); -DECLARE_TRACE(pelt_thermal_tp, +DECLARE_TRACE(pelt_hw_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h index 422c1ad948..1273004811 100644 --- a/include/trace/events/scmi.h +++ b/include/trace/events/scmi.h @@ -7,6 +7,8 @@ #include <linux/tracepoint.h> +#define TRACE_SCMI_MAX_TAG_LEN 6 + TRACE_EVENT(scmi_fc_call, TP_PROTO(u8 protocol_id, u8 msg_id, u32 res_id, u32 val1, u32 val2), TP_ARGS(protocol_id, msg_id, res_id, val1, val2), @@ -150,7 +152,7 @@ TRACE_EVENT(scmi_msg_dump, __field(u8, channel_id) __field(u8, protocol_id) __field(u8, msg_id) - __array(char, tag, 5) + __array(char, tag, TRACE_SCMI_MAX_TAG_LEN) __field(u16, seq) __field(int, status) __field(size_t, len) @@ -162,7 +164,7 @@ TRACE_EVENT(scmi_msg_dump, __entry->channel_id = channel_id; __entry->protocol_id = protocol_id; __entry->msg_id = msg_id; - strscpy(__entry->tag, tag, 5); + strscpy(__entry->tag, tag, TRACE_SCMI_MAX_TAG_LEN); __entry->seq = seq; __entry->status = status; __entry->len = len; diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index fd206a6ab5..3836de435d 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -10,6 +10,7 @@ #include <linux/tracepoint.h> #include <linux/ipv6.h> #include <linux/tcp.h> +#include <trace/events/net_probe_common.h> #define family_names \ EM(AF_INET) \ @@ -109,7 +110,7 @@ TRACE_EVENT(sock_exceed_buf_limit, ), TP_fast_assign( - strncpy(__entry->name, prot->name, 32); + strscpy(__entry->name, prot->name, 32); __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]); __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]); __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]); @@ -159,7 +160,6 @@ TRACE_EVENT(inet_sock_set_state, TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); - struct in6_addr *pin6; __be32 *p32; __entry->skaddr = sk; @@ -177,20 +177,8 @@ TRACE_EVENT(inet_sock_set_state, p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) { - pin6 = (struct in6_addr *)__entry->saddr_v6; - *pin6 = sk->sk_v6_rcv_saddr; - pin6 = (struct in6_addr *)__entry->daddr_v6; - *pin6 = sk->sk_v6_daddr; - } else -#endif - { - pin6 = (struct in6_addr *)__entry->saddr_v6; - ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); - pin6 = (struct in6_addr *)__entry->daddr_v6; - ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); - } + TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, + sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", @@ -223,7 +211,6 @@ TRACE_EVENT(inet_sk_error_report, TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); - struct in6_addr *pin6; __be32 *p32; __entry->error = sk->sk_err; @@ -238,20 +225,8 @@ TRACE_EVENT(inet_sk_error_report, p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) { - pin6 = (struct in6_addr *)__entry->saddr_v6; - *pin6 = sk->sk_v6_rcv_saddr; - pin6 = (struct in6_addr *)__entry->daddr_v6; - *pin6 = sk->sk_v6_daddr; - } else -#endif - { - pin6 = (struct in6_addr *)__entry->saddr_v6; - ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); - pin6 = (struct in6_addr *)__entry->daddr_v6; - ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); - } + TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, + sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c error=%d", diff --git a/include/trace/events/sof.h b/include/trace/events/sof.h index 21c2a1efb9..3061423c06 100644 --- a/include/trace/events/sof.h +++ b/include/trace/events/sof.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright(c) 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2022 Intel Corporation * * Author: Noah Klayman <noah.klayman@intel.com> */ @@ -23,7 +23,7 @@ DECLARE_EVENT_CLASS(sof_widget_template, __field(int, use_count) ), TP_fast_assign( - __assign_str(name, swidget->widget->name); + __assign_str(name); __entry->use_count = swidget->use_count; ), TP_printk("name=%s use_count=%d", __get_str(name), __entry->use_count) @@ -49,7 +49,7 @@ TRACE_EVENT(sof_ipc3_period_elapsed_position, __field(u64, wallclock) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); + __assign_str(device_name); __entry->host_posn = posn->host_posn; __entry->dai_posn = posn->dai_posn; __entry->wallclock = posn->wallclock; @@ -75,7 +75,7 @@ TRACE_EVENT(sof_pcm_pointer_position, __field(unsigned long, dai_posn) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); + __assign_str(device_name); __entry->pcm_id = le32_to_cpu(spcm->pcm.pcm_id); __entry->stream = substream->stream; __entry->dma_posn = dma_posn; @@ -93,7 +93,7 @@ TRACE_EVENT(sof_stream_position_ipc_rx, __string(device_name, dev_name(dev)) ), TP_fast_assign( - __assign_str(device_name, dev_name(dev)); + __assign_str(device_name); ), TP_printk("device_name=%s", __get_str(device_name)) ); @@ -107,8 +107,8 @@ TRACE_EVENT(sof_ipc4_fw_config, __field(u32, value) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); - __assign_str(key, key); + __assign_str(device_name); + __assign_str(key); __entry->value = value; ), TP_printk("device_name=%s key=%s value=%d", diff --git a/include/trace/events/sof_intel.h b/include/trace/events/sof_intel.h index 2a77f9d26c..9e579e57b1 100644 --- a/include/trace/events/sof_intel.h +++ b/include/trace/events/sof_intel.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright(c) 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2022 Intel Corporation * * Author: Noah Klayman <noah.klayman@intel.com> */ @@ -22,8 +22,8 @@ TRACE_EVENT(sof_intel_hda_irq, __string(source, source) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); - __assign_str(source, source); + __assign_str(device_name); + __assign_str(source); ), TP_printk("device_name=%s source=%s", __get_str(device_name), __get_str(source)) @@ -38,7 +38,7 @@ DECLARE_EVENT_CLASS(sof_intel_ipc_firmware_template, __field(u32, msg_ext) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); + __assign_str(device_name); __entry->msg = msg; __entry->msg_ext = msg_ext; ), @@ -64,7 +64,7 @@ TRACE_EVENT(sof_intel_D0I3C_updated, __field(u8, reg) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); + __assign_str(device_name); __entry->reg = reg; ), TP_printk("device_name=%s register=%#x", @@ -79,7 +79,7 @@ TRACE_EVENT(sof_intel_hda_irq_ipc_check, __field(u32, irq_status) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); + __assign_str(device_name); __entry->irq_status = irq_status; ), TP_printk("device_name=%s irq_status=%#x", @@ -100,7 +100,7 @@ TRACE_EVENT(sof_intel_hda_dsp_pcm, __field(unsigned long, pos) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); + __assign_str(device_name); __entry->hstream_index = hstream->index; __entry->substream = substream->stream; __entry->pos = pos; @@ -119,7 +119,7 @@ TRACE_EVENT(sof_intel_hda_dsp_stream_status, __field(u32, status) ), TP_fast_assign( - __assign_str(device_name, dev_name(dev)); + __assign_str(device_name); __entry->stream = s->index; __entry->status = status; ), @@ -135,7 +135,7 @@ TRACE_EVENT(sof_intel_hda_dsp_check_stream_irq, __field(u32, status) ), TP_fast_assign( - __assign_str(device_name, dev_name(sdev->dev)); + __assign_str(device_name); __entry->status = status; ), TP_printk("device_name=%s status=%#x", diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ac05ed06a0..5e84952166 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -188,10 +188,10 @@ TRACE_EVENT(rpc_clnt_new, __entry->client_id = clnt->cl_clid; __entry->xprtsec = args->xprtsec.policy; __entry->flags = args->flags; - __assign_str(program, clnt->cl_program->name); - __assign_str(server, xprt->servername); - __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); - __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + __assign_str(program); + __assign_str(server); + __assign_str(addr); + __assign_str(port); ), TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " peer=[%s]:%s" @@ -220,8 +220,8 @@ TRACE_EVENT(rpc_clnt_new_err, TP_fast_assign( __entry->error = error; - __assign_str(program, program); - __assign_str(server, server); + __assign_str(program); + __assign_str(server); ), TP_printk("program=%s server=%s error=%d", @@ -325,8 +325,8 @@ TRACE_EVENT(rpc_request, __entry->client_id = task->tk_client->cl_clid; __entry->version = task->tk_client->cl_vers; __entry->async = RPC_IS_ASYNC(task); - __assign_str(progname, task->tk_client->cl_program->name); - __assign_str(procname, rpc_proc_name(task)); + __assign_str(progname); + __assign_str(procname); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s (%ssync)", @@ -439,7 +439,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued, __entry->runstate = task->tk_runstate; __entry->status = task->tk_status; __entry->flags = task->tk_flags; - __assign_str(q_name, rpc_qname(q)); + __assign_str(q_name); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER @@ -515,10 +515,10 @@ DECLARE_EVENT_CLASS(rpc_reply_event, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); - __assign_str(progname, task->tk_client->cl_program->name); + __assign_str(progname); __entry->version = task->tk_client->cl_vers; - __assign_str(procname, rpc_proc_name(task)); - __assign_str(servername, task->tk_xprt->servername); + __assign_str(procname); + __assign_str(servername); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER @@ -647,8 +647,8 @@ TRACE_EVENT(rpc_stats_latency, __entry->task_id = task->tk_pid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); __entry->version = task->tk_client->cl_vers; - __assign_str(progname, task->tk_client->cl_program->name); - __assign_str(procname, rpc_proc_name(task)); + __assign_str(progname); + __assign_str(procname); __entry->backlog = ktime_to_us(backlog); __entry->rtt = ktime_to_us(rtt); __entry->execute = ktime_to_us(execute); @@ -697,16 +697,15 @@ TRACE_EVENT(rpc_xdr_overflow, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __assign_str(progname, - task->tk_client->cl_program->name); + __assign_str(progname); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name); + __assign_str(procedure); } else { __entry->task_id = -1; __entry->client_id = -1; - __assign_str(progname, "unknown"); + __assign_str(progname); __entry->version = 0; - __assign_str(procedure, "unknown"); + __assign_str(procedure); } __entry->requested = requested; __entry->end = xdr->end; @@ -763,10 +762,9 @@ TRACE_EVENT(rpc_xdr_alignment, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __assign_str(progname, - task->tk_client->cl_program->name); + __assign_str(progname); __entry->version = task->tk_client->cl_vers; - __assign_str(procedure, task->tk_msg.rpc_proc->p_name); + __assign_str(procedure); __entry->offset = offset; __entry->copied = copied; @@ -1018,8 +1016,8 @@ DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class, TP_fast_assign( __entry->state = xprt->state; - __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); - __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s state=%s", @@ -1061,8 +1059,8 @@ DECLARE_EVENT_CLASS(rpc_xprt_event, TP_fast_assign( __entry->xid = be32_to_cpu(xid); __entry->status = status; - __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); - __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s xid=0x%08x status=%d", __get_str(addr), @@ -1140,10 +1138,9 @@ TRACE_EVENT(xprt_retransmit, __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->ntrans = rqst->rq_ntrans; __entry->timeout = task->tk_timeout; - __assign_str(progname, - task->tk_client->cl_program->name); + __assign_str(progname); __entry->version = task->tk_client->cl_vers; - __assign_str(procname, rpc_proc_name(task)); + __assign_str(procname); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER @@ -1167,8 +1164,8 @@ TRACE_EVENT(xprt_ping, TP_fast_assign( __entry->status = status; - __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); - __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s status=%d", @@ -1315,8 +1312,8 @@ TRACE_EVENT(xs_data_ready, ), TP_fast_assign( - __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); - __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s", __get_str(addr), __get_str(port)) @@ -1339,10 +1336,8 @@ TRACE_EVENT(xs_stream_read_data, TP_fast_assign( __entry->err = err; __entry->total = total; - __assign_str(addr, xprt ? - xprt->address_strings[RPC_DISPLAY_ADDR] : EVENT_NULL_STR); - __assign_str(port, xprt ? - xprt->address_strings[RPC_DISPLAY_PORT] : EVENT_NULL_STR); + __assign_str(addr); + __assign_str(port); ), TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr), @@ -1364,8 +1359,8 @@ TRACE_EVENT(xs_stream_read_request, ), TP_fast_assign( - __assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]); - __assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]); + __assign_str(addr); + __assign_str(port); __entry->xid = be32_to_cpu(xs->recv.xid); __entry->copied = xs->recv.copied; __entry->reclen = xs->recv.len; @@ -1403,7 +1398,7 @@ TRACE_EVENT(rpcb_getport, __entry->version = clnt->cl_vers; __entry->protocol = task->tk_xprt->prot; __entry->bind_version = bind_version; - __assign_str(servername, task->tk_xprt->servername); + __assign_str(servername); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER @@ -1493,8 +1488,8 @@ TRACE_EVENT(rpcb_register, TP_fast_assign( __entry->program = program; __entry->version = version; - __assign_str(addr, addr); - __assign_str(netid, netid); + __assign_str(addr); + __assign_str(netid); ), TP_printk("program=%u version=%u addr=%s netid=%s", @@ -1521,7 +1516,7 @@ TRACE_EVENT(rpcb_unregister, TP_fast_assign( __entry->program = program; __entry->version = version; - __assign_str(netid, netid); + __assign_str(netid); ), TP_printk("program=%u version=%u netid=%s", @@ -1551,8 +1546,8 @@ DECLARE_EVENT_CLASS(rpc_tls_class, TP_fast_assign( __entry->requested_policy = clnt->cl_xprtsec.policy; __entry->version = clnt->cl_vers; - __assign_str(servername, xprt->servername); - __assign_str(progname, clnt->cl_program->name) + __assign_str(servername); + __assign_str(progname); ), TP_printk("server=%s %sv%u requested_policy=%s", @@ -1794,10 +1789,9 @@ TRACE_EVENT(svc_process, __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->vers = rqst->rq_vers; __entry->proc = rqst->rq_proc; - __assign_str(service, name); - __assign_str(procedure, svc_proc_name(rqst)); - __assign_str(addr, rqst->rq_xprt ? - rqst->rq_xprt->xpt_remotebuf : EVENT_NULL_STR); + __assign_str(service); + __assign_str(procedure); + __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%s", @@ -1915,7 +1909,7 @@ TRACE_EVENT(svc_stats_latency, __entry->execute = ktime_to_us(ktime_sub(ktime_get(), rqst->rq_stime)); - __assign_str(procedure, svc_proc_name(rqst)); + __assign_str(procedure); ), TP_printk(SVC_RQST_ENDPOINT_FORMAT " proc=%s execute-us=%lu", @@ -1980,8 +1974,8 @@ TRACE_EVENT(svc_xprt_create_err, TP_fast_assign( __entry->error = PTR_ERR(xprt); - __assign_str(program, program); - __assign_str(protocol, protocol); + __assign_str(program); + __assign_str(protocol); __assign_sockaddr(addr, sap, salen); ), @@ -2120,8 +2114,8 @@ TRACE_EVENT(svc_xprt_accept, TP_fast_assign( SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt); - __assign_str(protocol, xprt->xpt_class->xcl_name); - __assign_str(service, service); + __assign_str(protocol); + __assign_str(service); ), TP_printk(SVC_XPRT_ENDPOINT_FORMAT " protocol=%s service=%s", @@ -2260,7 +2254,7 @@ TRACE_EVENT(svcsock_marker, TP_fast_assign( __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK; __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT; - __assign_str(addr, xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s length=%u%s", __get_str(addr), @@ -2284,7 +2278,7 @@ DECLARE_EVENT_CLASS(svcsock_class, TP_fast_assign( __entry->result = result; __entry->flags = xprt->xpt_flags; - __assign_str(addr, xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s result=%zd flags=%s", __get_str(addr), @@ -2330,7 +2324,7 @@ TRACE_EVENT(svcsock_tcp_recv_short, __entry->expected = expected; __entry->received = received; __entry->flags = xprt->xpt_flags; - __assign_str(addr, xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s flags=%s expected=%u received=%u", @@ -2358,7 +2352,7 @@ TRACE_EVENT(svcsock_tcp_state, __entry->socket_state = socket->state; __entry->sock_state = socket->sk->sk_state; __entry->flags = xprt->xpt_flags; - __assign_str(addr, xprt->xpt_remotebuf); + __assign_str(addr); ), TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr), @@ -2385,7 +2379,7 @@ DECLARE_EVENT_CLASS(svcsock_accept_class, TP_fast_assign( __entry->status = status; - __assign_str(service, service); + __assign_str(service); __entry->netns_ino = xprt->xpt_net->ns.inum; ), @@ -2421,7 +2415,7 @@ DECLARE_EVENT_CLASS(cache_event, TP_fast_assign( __entry->h = h; - __assign_str(name, cd->name); + __assign_str(name); ), TP_printk("cache=%s entry=%p", __get_str(name), __entry->h) @@ -2466,7 +2460,7 @@ DECLARE_EVENT_CLASS(register_class, __entry->protocol = protocol; __entry->port = port; __entry->error = error; - __assign_str(program, program); + __assign_str(program); ), TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d", @@ -2511,7 +2505,7 @@ TRACE_EVENT(svc_unregister, TP_fast_assign( __entry->version = version; __entry->error = error; - __assign_str(program, program); + __assign_str(program); ), TP_printk("program=%sv%u error=%d", diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h index da05c9ebd2..3b6ddb136e 100644 --- a/include/trace/events/swiotlb.h +++ b/include/trace/events/swiotlb.h @@ -20,7 +20,7 @@ TRACE_EVENT(swiotlb_bounced, ), TP_fast_assign( - __assign_str(dev_name, dev_name(dev)); + __assign_str(dev_name); __entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0); __entry->dev_addr = dev_addr; __entry->size = size; diff --git a/include/trace/events/target.h b/include/trace/events/target.h index 67fad2677e..a13cbf2b34 100644 --- a/include/trace/events/target.h +++ b/include/trace/events/target.h @@ -154,7 +154,7 @@ TRACE_EVENT(target_sequencer_start, __entry->task_attribute = cmd->sam_task_attr; __entry->control = scsi_command_control(cmd->t_task_cdb); memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); - __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); + __assign_str(initiator); ), TP_printk("%s -> LUN %03u tag %#llx %s data_length %6u CDB %s (TA:%s C:%02x)", @@ -198,7 +198,7 @@ TRACE_EVENT(target_cmd_complete, min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0; memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); memcpy(__entry->sense_data, cmd->sense_buffer, __entry->sense_length); - __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); + __assign_str(initiator); ), TP_printk("%s <- LUN %03u tag %#llx status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)", diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 699dafd204..49b5ee091c 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -11,35 +11,7 @@ #include <net/ipv6.h> #include <net/tcp.h> #include <linux/sock_diag.h> - -#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \ - do { \ - struct in6_addr *pin6; \ - \ - pin6 = (struct in6_addr *)__entry->saddr_v6; \ - ipv6_addr_set_v4mapped(saddr, pin6); \ - pin6 = (struct in6_addr *)__entry->daddr_v6; \ - ipv6_addr_set_v4mapped(daddr, pin6); \ - } while (0) - -#if IS_ENABLED(CONFIG_IPV6) -#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ - do { \ - if (sk->sk_family == AF_INET6) { \ - struct in6_addr *pin6; \ - \ - pin6 = (struct in6_addr *)__entry->saddr_v6; \ - *pin6 = saddr6; \ - pin6 = (struct in6_addr *)__entry->daddr_v6; \ - *pin6 = daddr6; \ - } else { \ - TP_STORE_V4MAPPED(__entry, saddr, daddr); \ - } \ - } while (0) -#else -#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ - TP_STORE_V4MAPPED(__entry, saddr, daddr) -#endif +#include <net/rstreason.h> /* * tcp event with arguments sk and skb @@ -103,17 +75,70 @@ DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb, TP_ARGS(sk, skb) ); +#undef FN +#define FN(reason) TRACE_DEFINE_ENUM(SK_RST_REASON_##reason); +DEFINE_RST_REASON(FN, FN) + +#undef FN +#undef FNe +#define FN(reason) { SK_RST_REASON_##reason, #reason }, +#define FNe(reason) { SK_RST_REASON_##reason, #reason } + /* * skb of trace_tcp_send_reset is the skb that caused RST. In case of * active reset, skb should be NULL */ -DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset, +TRACE_EVENT(tcp_send_reset, - TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + TP_PROTO(const struct sock *sk, + const struct sk_buff *skb, + const enum sk_rst_reason reason), - TP_ARGS(sk, skb) + TP_ARGS(sk, skb, reason), + + TP_STRUCT__entry( + __field(const void *, skbaddr) + __field(const void *, skaddr) + __field(int, state) + __field(enum sk_rst_reason, reason) + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->skaddr = sk; + /* Zero means unknown state. */ + __entry->state = sk ? sk->sk_state : 0; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + + if (sk && sk_fullsock(sk)) { + const struct inet_sock *inet = inet_sk(sk); + + TP_STORE_ADDR_PORTS(__entry, inet, sk); + } else if (skb) { + const struct tcphdr *th = (const struct tcphdr *)skb->data; + /* + * We should reverse the 4-tuple of skb, so later + * it can print the right flow direction of rst. + */ + TP_STORE_ADDR_PORTS_SKB(skb, th, entry->daddr, entry->saddr); + } + __entry->reason = reason; + ), + + TP_printk("skbaddr=%p skaddr=%p src=%pISpc dest=%pISpc state=%s reason=%s", + __entry->skbaddr, __entry->skaddr, + __entry->saddr, __entry->daddr, + __entry->state ? show_tcp_state_name(__entry->state) : "UNKNOWN", + __print_symbolic(__entry->reason, DEFINE_RST_REASON(FN, FNe))) ); +#undef FN +#undef FNe + /* * tcp event with arguments sk * @@ -302,48 +327,6 @@ TRACE_EVENT(tcp_probe, __entry->skbaddr, __entry->skaddr) ); -#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \ - do { \ - const struct tcphdr *th = (const struct tcphdr *)skb->data; \ - struct sockaddr_in *v4 = (void *)__entry->saddr; \ - \ - v4->sin_family = AF_INET; \ - v4->sin_port = th->source; \ - v4->sin_addr.s_addr = ip_hdr(skb)->saddr; \ - v4 = (void *)__entry->daddr; \ - v4->sin_family = AF_INET; \ - v4->sin_port = th->dest; \ - v4->sin_addr.s_addr = ip_hdr(skb)->daddr; \ - } while (0) - -#if IS_ENABLED(CONFIG_IPV6) - -#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \ - do { \ - const struct iphdr *iph = ip_hdr(skb); \ - \ - if (iph->version == 6) { \ - const struct tcphdr *th = (const struct tcphdr *)skb->data; \ - struct sockaddr_in6 *v6 = (void *)__entry->saddr; \ - \ - v6->sin6_family = AF_INET6; \ - v6->sin6_port = th->source; \ - v6->sin6_addr = ipv6_hdr(skb)->saddr; \ - v6 = (void *)__entry->daddr; \ - v6->sin6_family = AF_INET6; \ - v6->sin6_port = th->dest; \ - v6->sin6_addr = ipv6_hdr(skb)->daddr; \ - } else \ - TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb); \ - } while (0) - -#else - -#define TP_STORE_ADDR_PORTS_SKB(__entry, skb) \ - TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) - -#endif - /* * tcp event with only skb */ @@ -360,12 +343,13 @@ DECLARE_EVENT_CLASS(tcp_event_skb, ), TP_fast_assign( + const struct tcphdr *th = (const struct tcphdr *)skb->data; __entry->skbaddr = skb; memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); - TP_STORE_ADDR_PORTS_SKB(__entry, skb); + TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr); ), TP_printk("skbaddr=%p src=%pISpc dest=%pISpc", diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h index 971cd02d2d..6d9f5075ba 100644 --- a/include/trace/events/tegra_apb_dma.h +++ b/include/trace/events/tegra_apb_dma.h @@ -16,7 +16,7 @@ TRACE_EVENT(tegra_dma_tx_status, __field(__u32, residue) ), TP_fast_assign( - __assign_str(chan, dev_name(&dc->dev->device)); + __assign_str(chan); __entry->cookie = cookie; __entry->residue = state ? state->residue : (u32)-1; ), @@ -33,7 +33,7 @@ TRACE_EVENT(tegra_dma_complete_cb, __field(void *, ptr) ), TP_fast_assign( - __assign_str(chan, dev_name(&dc->dev->device)); + __assign_str(chan); __entry->count = count; __entry->ptr = ptr; ), @@ -49,7 +49,7 @@ TRACE_EVENT(tegra_dma_isr, __field(int, irq) ), TP_fast_assign( - __assign_str(chan, dev_name(&dc->dev->device)); + __assign_str(chan); __entry->irq = irq; ), TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq) diff --git a/include/trace/events/udp.h b/include/trace/events/udp.h index 336fe27288..6142be4068 100644 --- a/include/trace/events/udp.h +++ b/include/trace/events/udp.h @@ -7,24 +7,43 @@ #include <linux/udp.h> #include <linux/tracepoint.h> +#include <trace/events/net_probe_common.h> TRACE_EVENT(udp_fail_queue_rcv_skb, - TP_PROTO(int rc, struct sock *sk), + TP_PROTO(int rc, struct sock *sk, struct sk_buff *skb), - TP_ARGS(rc, sk), + TP_ARGS(rc, sk, skb), TP_STRUCT__entry( __field(int, rc) - __field(__u16, lport) + + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( + const struct udphdr *uh = (const struct udphdr *)udp_hdr(skb); + __entry->rc = rc; - __entry->lport = inet_sk(sk)->inet_num; + + /* for filtering use */ + __entry->sport = ntohs(uh->source); + __entry->dport = ntohs(uh->dest); + __entry->family = sk->sk_family; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + + TP_STORE_ADDR_PORTS_SKB(skb, uh, __entry->saddr, __entry->daddr); ), - TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport) + TP_printk("rc=%d family=%s src=%pISpc dest=%pISpc", __entry->rc, + show_family_name(__entry->family), + __entry->saddr, __entry->daddr) ); #endif /* _TRACE_UDP_H */ diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index b930669bd1..c4e209fbdf 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -92,7 +92,7 @@ TRACE_EVENT(ufshcd_clk_gating, ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __assign_str(dev_name); __entry->state = state; ), @@ -117,9 +117,9 @@ TRACE_EVENT(ufshcd_clk_scaling, ), TP_fast_assign( - __assign_str(dev_name, dev_name); - __assign_str(state, state); - __assign_str(clk, clk); + __assign_str(dev_name); + __assign_str(state); + __assign_str(clk); __entry->prev_state = prev_state; __entry->curr_state = curr_state; ), @@ -141,8 +141,8 @@ TRACE_EVENT(ufshcd_auto_bkops_state, ), TP_fast_assign( - __assign_str(dev_name, dev_name); - __assign_str(state, state); + __assign_str(dev_name); + __assign_str(state); ), TP_printk("%s: auto bkops - %s", @@ -163,8 +163,8 @@ DECLARE_EVENT_CLASS(ufshcd_profiling_template, ), TP_fast_assign( - __assign_str(dev_name, dev_name); - __assign_str(profile_info, profile_info); + __assign_str(dev_name); + __assign_str(profile_info); __entry->time_us = time_us; __entry->err = err; ), @@ -206,7 +206,7 @@ DECLARE_EVENT_CLASS(ufshcd_template, TP_fast_assign( __entry->usecs = usecs; __entry->err = err; - __assign_str(dev_name, dev_name); + __assign_str(dev_name); __entry->dev_state = dev_state; __entry->link_state = link_state; ), @@ -326,7 +326,7 @@ TRACE_EVENT(ufshcd_uic_command, ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __assign_str(dev_name); __entry->str_t = str_t; __entry->cmd = cmd; __entry->arg1 = arg1; @@ -356,7 +356,7 @@ TRACE_EVENT(ufshcd_upiu, ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __assign_str(dev_name); __entry->str_t = str_t; memcpy(__entry->hdr, hdr, sizeof(__entry->hdr)); memcpy(__entry->tsf, tsf, sizeof(__entry->tsf)); @@ -384,7 +384,7 @@ TRACE_EVENT(ufshcd_exception_event, ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __assign_str(dev_name); __entry->status = status; ), diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index 262d52021c..b0de2bc9ed 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -38,7 +38,7 @@ TRACE_EVENT(workqueue_queue_work, TP_fast_assign( __entry->work = work; __entry->function = work->func; - __assign_str(workqueue, pwq->wq->name); + __assign_str(workqueue); __entry->req_cpu = req_cpu; __entry->cpu = pwq->pool->cpu; ), @@ -64,13 +64,15 @@ TRACE_EVENT(workqueue_activate_work, TP_STRUCT__entry( __field( void *, work ) + __field( void *, function) ), TP_fast_assign( __entry->work = work; + __entry->function = work->func; ), - TP_printk("work struct %p", __entry->work) + TP_printk("work struct %p function=%ps ", __entry->work, __entry->function) ); /** diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 9adc2bdf2f..a7e5452b5d 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -416,7 +416,7 @@ TRACE_EVENT(bpf_xdp_link_attach_failed, ), TP_fast_assign( - __assign_str(msg, msg); + __assign_str(msg); ), TP_printk("errmsg=%s", __get_str(msg)) diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h index e43e745915..7b221d5113 100644 --- a/include/trace/misc/nfs.h +++ b/include/trace/misc/nfs.h @@ -28,7 +28,6 @@ TRACE_DEFINE_ENUM(NFSERR_FBIG); TRACE_DEFINE_ENUM(NFSERR_NOSPC); TRACE_DEFINE_ENUM(NFSERR_ROFS); TRACE_DEFINE_ENUM(NFSERR_MLINK); -TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP); TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG); TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY); TRACE_DEFINE_ENUM(NFSERR_DQUOT); @@ -64,7 +63,6 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX); { NFSERR_NOSPC, "NOSPC" }, \ { NFSERR_ROFS, "ROFS" }, \ { NFSERR_MLINK, "MLINK" }, \ - { NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \ { NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \ { NFSERR_NOTEMPTY, "NOTEMPTY" }, \ { NFSERR_DQUOT, "DQUOT" }, \ diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h index 3690e67726..1691676fd8 100644 --- a/include/trace/stages/stage6_event_callback.h +++ b/include/trace/stages/stage6_event_callback.h @@ -31,12 +31,10 @@ #define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) #undef __assign_str -#define __assign_str(dst, src) \ +#define __assign_str(dst) \ do { \ char *__str__ = __get_str(dst); \ int __len__ = __get_dynamic_array_len(dst) - 1; \ - WARN_ON_ONCE(!(void *)(src) != !(void *)__data_offsets.dst##_ptr_); \ - WARN_ON_ONCE((src) && strcmp((src), __data_offsets.dst##_ptr_)); \ memcpy(__str__, __data_offsets.dst##_ptr_ ? : \ EVENT_NULL_STR, __len__); \ __str__[__len__] = '\0'; \ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 060f68e419..d4cc26932f 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -842,8 +842,11 @@ __SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr) #define __NR_lsm_list_modules 461 __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) +#define __NR_mseal 462 +__SYSCALL(__NR_mseal, sys_mseal) + #undef __NR_syscalls -#define __NR_syscalls 462 +#define __NR_syscalls 463 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 7040e7ea80..1ca5c7e418 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -866,6 +866,17 @@ struct drm_color_lut { }; /** + * struct drm_plane_size_hint - Plane size hints + * + * The plane SIZE_HINTS property blob contains an + * array of struct drm_plane_size_hint. + */ +struct drm_plane_size_hint { + __u16 width; + __u16 height; +}; + +/** * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data. * * HDR Metadata Infoframe as per CTA 861.G spec. This is expected diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 2ee338860b..d4d86e566e 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -806,6 +806,12 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_PXP_STATUS 58 +/* + * Query if kernel allows marking a context to send a Freq hint to SLPC. This + * will enable use of the strategies allowed by the SLPC algorithm. + */ +#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59 + /* Must be kept compact -- no holes and well documented */ /** @@ -2148,6 +2154,15 @@ struct drm_i915_gem_context_param { * -EIO: The firmware did not succeed in creating the protected context. */ #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd + +/* + * I915_CONTEXT_PARAM_LOW_LATENCY: + * + * Mark this context as a low latency workload which requires aggressive GT + * frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel + * supports this per context flag. + */ +#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe /* Must be kept compact -- no holes and well documented */ /** @value: Context parameter value to be set or queried */ @@ -2623,19 +2638,29 @@ struct drm_i915_reg_read { * */ +/* + * struct drm_i915_reset_stats - Return global reset and other context stats + * + * Driver keeps few stats for each contexts and also global reset count. + * This struct can be used to query those stats. + */ struct drm_i915_reset_stats { + /** @ctx_id: ID of the requested context */ __u32 ctx_id; + + /** @flags: MBZ */ __u32 flags; - /* All resets since boot/module reload, for all contexts */ + /** @reset_count: All resets since boot/module reload, for all contexts */ __u32 reset_count; - /* Number of batches lost when active in GPU, for this context */ + /** @batch_active: Number of batches lost when active in GPU, for this context */ __u32 batch_active; - /* Number of batches lost pending for execution, for this context */ + /** @batch_pending: Number of batches lost pending for execution, for this context */ __u32 batch_pending; + /** @pad: MBZ */ __u32 pad; }; diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h index 5402f77ee8..dd87f8f307 100644 --- a/include/uapi/drm/nouveau_drm.h +++ b/include/uapi/drm/nouveau_drm.h @@ -80,6 +80,16 @@ struct drm_nouveau_getparam { __u64 value; }; +/* + * Those are used to support selecting the main engine used on Kepler. + * This goes into drm_nouveau_channel_alloc::tt_ctxdma_handle + */ +#define NOUVEAU_FIFO_ENGINE_GR 0x01 +#define NOUVEAU_FIFO_ENGINE_VP 0x02 +#define NOUVEAU_FIFO_ENGINE_PPP 0x04 +#define NOUVEAU_FIFO_ENGINE_BSP 0x08 +#define NOUVEAU_FIFO_ENGINE_CE 0x30 + struct drm_nouveau_channel_alloc { __u32 fb_ctxdma_handle; __u32 tt_ctxdma_handle; @@ -102,6 +112,18 @@ struct drm_nouveau_channel_free { __s32 channel; }; +struct drm_nouveau_notifierobj_alloc { + __u32 channel; + __u32 handle; + __u32 size; + __u32 offset; +}; + +struct drm_nouveau_gpuobj_free { + __s32 channel; + __u32 handle; +}; + #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h new file mode 100644 index 0000000000..926b1deb11 --- /dev/null +++ b/include/uapi/drm/panthor_drm.h @@ -0,0 +1,962 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2023 Collabora ltd. */ +#ifndef _PANTHOR_DRM_H_ +#define _PANTHOR_DRM_H_ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * DOC: Introduction + * + * This documentation describes the Panthor IOCTLs. + * + * Just a few generic rules about the data passed to the Panthor IOCTLs: + * + * - Structures must be aligned on 64-bit/8-byte. If the object is not + * naturally aligned, a padding field must be added. + * - Fields must be explicitly aligned to their natural type alignment with + * pad[0..N] fields. + * - All padding fields will be checked by the driver to make sure they are + * zeroed. + * - Flags can be added, but not removed/replaced. + * - New fields can be added to the main structures (the structures + * directly passed to the ioctl). Those fields can be added at the end of + * the structure, or replace existing padding fields. Any new field being + * added must preserve the behavior that existed before those fields were + * added when a value of zero is passed. + * - New fields can be added to indirect objects (objects pointed by the + * main structure), iff those objects are passed a size to reflect the + * size known by the userspace driver (see drm_panthor_obj_array::stride + * or drm_panthor_dev_query::size). + * - If the kernel driver is too old to know some fields, those will be + * ignored if zero, and otherwise rejected (and so will be zero on output). + * - If userspace is too old to know some fields, those will be zeroed + * (input) before the structure is parsed by the kernel driver. + * - Each new flag/field addition must come with a driver version update so + * the userspace driver doesn't have to trial and error to know which + * flags are supported. + * - Structures should not contain unions, as this would defeat the + * extensibility of such structures. + * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed + * at the end of the drm_panthor_ioctl_id enum. + */ + +/** + * DOC: MMIO regions exposed to userspace. + * + * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET + * + * File offset for all MMIO regions being exposed to userspace. Don't use + * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead. + * pgoffset passed to mmap2() is an unsigned long, which forces us to use a + * different offset on 32-bit and 64-bit systems. + * + * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET + * + * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls + * GPU cache flushing through CS instructions, but the flush reduction + * mechanism requires a flush_id. This flush_id could be queried with an + * ioctl, but Arm provides a well-isolated register page containing only this + * read-only register, so let's expose this page through a static mmap offset + * and allow direct mapping of this MMIO region so we can avoid the + * user <-> kernel round-trip. + */ +#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43) +#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56) +#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \ + DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \ + DRM_PANTHOR_USER_MMIO_OFFSET_64BIT) +#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0) + +/** + * DOC: IOCTL IDs + * + * enum drm_panthor_ioctl_id - IOCTL IDs + * + * Place new ioctls at the end, don't re-order, don't replace or remove entries. + * + * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx + * definitions instead. + */ +enum drm_panthor_ioctl_id { + /** @DRM_PANTHOR_DEV_QUERY: Query device information. */ + DRM_PANTHOR_DEV_QUERY = 0, + + /** @DRM_PANTHOR_VM_CREATE: Create a VM. */ + DRM_PANTHOR_VM_CREATE, + + /** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */ + DRM_PANTHOR_VM_DESTROY, + + /** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */ + DRM_PANTHOR_VM_BIND, + + /** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */ + DRM_PANTHOR_VM_GET_STATE, + + /** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */ + DRM_PANTHOR_BO_CREATE, + + /** + * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to + * mmap to map a GEM object. + */ + DRM_PANTHOR_BO_MMAP_OFFSET, + + /** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */ + DRM_PANTHOR_GROUP_CREATE, + + /** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */ + DRM_PANTHOR_GROUP_DESTROY, + + /** + * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging + * to a specific scheduling group. + */ + DRM_PANTHOR_GROUP_SUBMIT, + + /** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */ + DRM_PANTHOR_GROUP_GET_STATE, + + /** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */ + DRM_PANTHOR_TILER_HEAP_CREATE, + + /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */ + DRM_PANTHOR_TILER_HEAP_DESTROY, +}; + +/** + * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number + * @__access: Access type. Must be R, W or RW. + * @__id: One of the DRM_PANTHOR_xxx id. + * @__type: Suffix of the type being passed to the IOCTL. + * + * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx + * values instead. + * + * Return: An IOCTL number to be passed to ioctl() from userspace. + */ +#define DRM_IOCTL_PANTHOR(__access, __id, __type) \ + DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \ + struct drm_panthor_ ## __type) + +#define DRM_IOCTL_PANTHOR_DEV_QUERY \ + DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query) +#define DRM_IOCTL_PANTHOR_VM_CREATE \ + DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create) +#define DRM_IOCTL_PANTHOR_VM_DESTROY \ + DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy) +#define DRM_IOCTL_PANTHOR_VM_BIND \ + DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind) +#define DRM_IOCTL_PANTHOR_VM_GET_STATE \ + DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state) +#define DRM_IOCTL_PANTHOR_BO_CREATE \ + DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create) +#define DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET \ + DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset) +#define DRM_IOCTL_PANTHOR_GROUP_CREATE \ + DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create) +#define DRM_IOCTL_PANTHOR_GROUP_DESTROY \ + DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy) +#define DRM_IOCTL_PANTHOR_GROUP_SUBMIT \ + DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit) +#define DRM_IOCTL_PANTHOR_GROUP_GET_STATE \ + DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state) +#define DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE \ + DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create) +#define DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY \ + DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy) + +/** + * DOC: IOCTL arguments + */ + +/** + * struct drm_panthor_obj_array - Object array. + * + * This object is used to pass an array of objects whose size is subject to changes in + * future versions of the driver. In order to support this mutability, we pass a stride + * describing the size of the object as known by userspace. + * + * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use + * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to + * the object size. + */ +struct drm_panthor_obj_array { + /** @stride: Stride of object struct. Used for versioning. */ + __u32 stride; + + /** @count: Number of objects in the array. */ + __u32 count; + + /** @array: User pointer to an array of objects. */ + __u64 array; +}; + +/** + * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field. + * @cnt: Number of elements in the array. + * @ptr: Pointer to the array to pass to the kernel. + * + * Macro initializing a drm_panthor_obj_array based on the object size as known + * by userspace. + */ +#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \ + { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) } + +/** + * enum drm_panthor_sync_op_flags - Synchronization operation flags. + */ +enum drm_panthor_sync_op_flags { + /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */ + DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff, + + /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */ + DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0, + + /** + * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization + * object type. + */ + DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1, + + /** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */ + DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31, + + /** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */ + DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31), +}; + +/** + * struct drm_panthor_sync_op - Synchronization operation. + */ +struct drm_panthor_sync_op { + /** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */ + __u32 flags; + + /** @handle: Sync handle. */ + __u32 handle; + + /** + * @timeline_value: MBZ if + * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) != + * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ. + */ + __u64 timeline_value; +}; + +/** + * enum drm_panthor_dev_query_type - Query type + * + * Place new types at the end, don't re-order, don't remove or replace. + */ +enum drm_panthor_dev_query_type { + /** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */ + DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0, + + /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */ + DRM_PANTHOR_DEV_QUERY_CSIF_INFO, +}; + +/** + * struct drm_panthor_gpu_info - GPU information + * + * Structure grouping all queryable information relating to the GPU. + */ +struct drm_panthor_gpu_info { + /** @gpu_id : GPU ID. */ + __u32 gpu_id; +#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28) +#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf) +#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf) +#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf) +#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf) +#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff) +#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf) + + /** @gpu_rev: GPU revision. */ + __u32 gpu_rev; + + /** @csf_id: Command stream frontend ID. */ + __u32 csf_id; +#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f) +#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f) +#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf) +#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f) +#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f) +#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf) + + /** @l2_features: L2-cache features. */ + __u32 l2_features; + + /** @tiler_features: Tiler features. */ + __u32 tiler_features; + + /** @mem_features: Memory features. */ + __u32 mem_features; + + /** @mmu_features: MMU features. */ + __u32 mmu_features; +#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff) + + /** @thread_features: Thread features. */ + __u32 thread_features; + + /** @max_threads: Maximum number of threads. */ + __u32 max_threads; + + /** @thread_max_workgroup_size: Maximum workgroup size. */ + __u32 thread_max_workgroup_size; + + /** + * @thread_max_barrier_size: Maximum number of threads that can wait + * simultaneously on a barrier. + */ + __u32 thread_max_barrier_size; + + /** @coherency_features: Coherency features. */ + __u32 coherency_features; + + /** @texture_features: Texture features. */ + __u32 texture_features[4]; + + /** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */ + __u32 as_present; + + /** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */ + __u64 shader_present; + + /** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */ + __u64 l2_present; + + /** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */ + __u64 tiler_present; + + /** @core_features: Used to discriminate core variants when they exist. */ + __u32 core_features; + + /** @pad: MBZ. */ + __u32 pad; +}; + +/** + * struct drm_panthor_csif_info - Command stream interface information + * + * Structure grouping all queryable information relating to the command stream interface. + */ +struct drm_panthor_csif_info { + /** @csg_slot_count: Number of command stream group slots exposed by the firmware. */ + __u32 csg_slot_count; + + /** @cs_slot_count: Number of command stream slots per group. */ + __u32 cs_slot_count; + + /** @cs_reg_count: Number of command stream registers. */ + __u32 cs_reg_count; + + /** @scoreboard_slot_count: Number of scoreboard slots. */ + __u32 scoreboard_slot_count; + + /** + * @unpreserved_cs_reg_count: Number of command stream registers reserved by + * the kernel driver to call a userspace command stream. + * + * All registers can be used by a userspace command stream, but the + * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are + * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called. + */ + __u32 unpreserved_cs_reg_count; + + /** + * @pad: Padding field, set to zero. + */ + __u32 pad; +}; + +/** + * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY + */ +struct drm_panthor_dev_query { + /** @type: the query type (see drm_panthor_dev_query_type). */ + __u32 type; + + /** + * @size: size of the type being queried. + * + * If pointer is NULL, size is updated by the driver to provide the + * output structure size. If pointer is not NULL, the driver will + * only copy min(size, actual_structure_size) bytes to the pointer, + * and update the size accordingly. This allows us to extend query + * types without breaking userspace. + */ + __u32 size; + + /** + * @pointer: user pointer to a query type struct. + * + * Pointer can be NULL, in which case, nothing is copied, but the + * actual structure size is returned. If not NULL, it must point to + * a location that's large enough to hold size bytes. + */ + __u64 pointer; +}; + +/** + * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE + */ +struct drm_panthor_vm_create { + /** @flags: VM flags, MBZ. */ + __u32 flags; + + /** @id: Returned VM ID. */ + __u32 id; + + /** + * @user_va_range: Size of the VA space reserved for user objects. + * + * The kernel will pick the remaining space to map kernel-only objects to the + * VM (heap chunks, heap context, ring buffers, kernel synchronization objects, + * ...). If the space left for kernel objects is too small, kernel object + * allocation will fail further down the road. One can use + * drm_panthor_gpu_info::mmu_features to extract the total virtual address + * range, and chose a user_va_range that leaves some space to the kernel. + * + * If user_va_range is zero, the kernel will pick a sensible value based on + * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user + * split should leave enough VA space for userspace processes to support SVM, + * while still allowing the kernel to map some amount of kernel objects in + * the kernel VA range). The value chosen by the driver will be returned in + * @user_va_range. + * + * User VA space always starts at 0x0, kernel VA space is always placed after + * the user VA range. + */ + __u64 user_va_range; +}; + +/** + * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY + */ +struct drm_panthor_vm_destroy { + /** @id: ID of the VM to destroy. */ + __u32 id; + + /** @pad: MBZ. */ + __u32 pad; +}; + +/** + * enum drm_panthor_vm_bind_op_flags - VM bind operation flags + */ +enum drm_panthor_vm_bind_op_flags { + /** + * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only. + * + * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP. + */ + DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0, + + /** + * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable. + * + * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP. + */ + DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1, + + /** + * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached. + * + * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP. + */ + DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2, + + /** + * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation. + */ + DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28), + + /** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */ + DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28, + + /** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */ + DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28, + + /** + * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation. + * + * Just serves as a synchronization point on a VM queue. + * + * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags, + * and drm_panthor_vm_bind_op::syncs contains at least one element. + */ + DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28, +}; + +/** + * struct drm_panthor_vm_bind_op - VM bind operation + */ +struct drm_panthor_vm_bind_op { + /** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */ + __u32 flags; + + /** + * @bo_handle: Handle of the buffer object to map. + * MBZ for unmap or sync-only operations. + */ + __u32 bo_handle; + + /** + * @bo_offset: Buffer object offset. + * MBZ for unmap or sync-only operations. + */ + __u64 bo_offset; + + /** + * @va: Virtual address to map/unmap. + * MBZ for sync-only operations. + */ + __u64 va; + + /** + * @size: Size to map/unmap. + * MBZ for sync-only operations. + */ + __u64 size; + + /** + * @syncs: Array of struct drm_panthor_sync_op synchronization + * operations. + * + * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on + * the drm_panthor_vm_bind object containing this VM bind operation. + * + * This array shall not be empty for sync-only operations. + */ + struct drm_panthor_obj_array syncs; + +}; + +/** + * enum drm_panthor_vm_bind_flags - VM bind flags + */ +enum drm_panthor_vm_bind_flags { + /** + * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM + * queue instead of being executed synchronously. + */ + DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0, +}; + +/** + * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND + */ +struct drm_panthor_vm_bind { + /** @vm_id: VM targeted by the bind request. */ + __u32 vm_id; + + /** @flags: Combination of drm_panthor_vm_bind_flags flags. */ + __u32 flags; + + /** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */ + struct drm_panthor_obj_array ops; +}; + +/** + * enum drm_panthor_vm_state - VM states. + */ +enum drm_panthor_vm_state { + /** + * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable. + * + * New VM operations will be accepted on this VM. + */ + DRM_PANTHOR_VM_STATE_USABLE, + + /** + * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable. + * + * Something put the VM in an unusable state (like an asynchronous + * VM_BIND request failing for any reason). + * + * Once the VM is in this state, all new MAP operations will be + * rejected, and any GPU job targeting this VM will fail. + * UNMAP operations are still accepted. + * + * The only way to recover from an unusable VM is to create a new + * VM, and destroy the old one. + */ + DRM_PANTHOR_VM_STATE_UNUSABLE, +}; + +/** + * struct drm_panthor_vm_get_state - Get VM state. + */ +struct drm_panthor_vm_get_state { + /** @vm_id: VM targeted by the get_state request. */ + __u32 vm_id; + + /** + * @state: state returned by the driver. + * + * Must be one of the enum drm_panthor_vm_state values. + */ + __u32 state; +}; + +/** + * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time. + */ +enum drm_panthor_bo_flags { + /** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */ + DRM_PANTHOR_BO_NO_MMAP = (1 << 0), +}; + +/** + * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE. + */ +struct drm_panthor_bo_create { + /** + * @size: Requested size for the object + * + * The (page-aligned) allocated size for the object will be returned. + */ + __u64 size; + + /** + * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags. + */ + __u32 flags; + + /** + * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to. + * + * If not zero, the field must refer to a valid VM ID, and implies that: + * - the buffer object will only ever be bound to that VM + * - cannot be exported as a PRIME fd + */ + __u32 exclusive_vm_id; + + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + + /** @pad: MBZ. */ + __u32 pad; +}; + +/** + * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET. + */ +struct drm_panthor_bo_mmap_offset { + /** @handle: Handle of the object we want an mmap offset for. */ + __u32 handle; + + /** @pad: MBZ. */ + __u32 pad; + + /** @offset: The fake offset to use for subsequent mmap calls. */ + __u64 offset; +}; + +/** + * struct drm_panthor_queue_create - Queue creation arguments. + */ +struct drm_panthor_queue_create { + /** + * @priority: Defines the priority of queues inside a group. Goes from 0 to 15, + * 15 being the highest priority. + */ + __u8 priority; + + /** @pad: Padding fields, MBZ. */ + __u8 pad[3]; + + /** @ringbuf_size: Size of the ring buffer to allocate to this queue. */ + __u32 ringbuf_size; +}; + +/** + * enum drm_panthor_group_priority - Scheduling group priority + */ +enum drm_panthor_group_priority { + /** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */ + PANTHOR_GROUP_PRIORITY_LOW = 0, + + /** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */ + PANTHOR_GROUP_PRIORITY_MEDIUM, + + /** @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. */ + PANTHOR_GROUP_PRIORITY_HIGH, +}; + +/** + * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE + */ +struct drm_panthor_group_create { + /** @queues: Array of drm_panthor_queue_create elements. */ + struct drm_panthor_obj_array queues; + + /** + * @max_compute_cores: Maximum number of cores that can be used by compute + * jobs across CS queues bound to this group. + * + * Must be less or equal to the number of bits set in @compute_core_mask. + */ + __u8 max_compute_cores; + + /** + * @max_fragment_cores: Maximum number of cores that can be used by fragment + * jobs across CS queues bound to this group. + * + * Must be less or equal to the number of bits set in @fragment_core_mask. + */ + __u8 max_fragment_cores; + + /** + * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs + * across CS queues bound to this group. + * + * Must be less or equal to the number of bits set in @tiler_core_mask. + */ + __u8 max_tiler_cores; + + /** @priority: Group priority (see enum drm_panthor_group_priority). */ + __u8 priority; + + /** @pad: Padding field, MBZ. */ + __u32 pad; + + /** + * @compute_core_mask: Mask encoding cores that can be used for compute jobs. + * + * This field must have at least @max_compute_cores bits set. + * + * The bits set here should also be set in drm_panthor_gpu_info::shader_present. + */ + __u64 compute_core_mask; + + /** + * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs. + * + * This field must have at least @max_fragment_cores bits set. + * + * The bits set here should also be set in drm_panthor_gpu_info::shader_present. + */ + __u64 fragment_core_mask; + + /** + * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs. + * + * This field must have at least @max_tiler_cores bits set. + * + * The bits set here should also be set in drm_panthor_gpu_info::tiler_present. + */ + __u64 tiler_core_mask; + + /** + * @vm_id: VM ID to bind this group to. + * + * All submission to queues bound to this group will use this VM. + */ + __u32 vm_id; + + /** + * @group_handle: Returned group handle. Passed back when submitting jobs or + * destroying a group. + */ + __u32 group_handle; +}; + +/** + * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY + */ +struct drm_panthor_group_destroy { + /** @group_handle: Group to destroy */ + __u32 group_handle; + + /** @pad: Padding field, MBZ. */ + __u32 pad; +}; + +/** + * struct drm_panthor_queue_submit - Job submission arguments. + * + * This is describing the userspace command stream to call from the kernel + * command stream ring-buffer. Queue submission is always part of a group + * submission, taking one or more jobs to submit to the underlying queues. + */ +struct drm_panthor_queue_submit { + /** @queue_index: Index of the queue inside a group. */ + __u32 queue_index; + + /** + * @stream_size: Size of the command stream to execute. + * + * Must be 64-bit/8-byte aligned (the size of a CS instruction) + * + * Can be zero if stream_addr is zero too. + * + * When the stream size is zero, the queue submit serves as a + * synchronization point. + */ + __u32 stream_size; + + /** + * @stream_addr: GPU address of the command stream to execute. + * + * Must be aligned on 64-byte. + * + * Can be zero is stream_size is zero too. + */ + __u64 stream_addr; + + /** + * @latest_flush: FLUSH_ID read at the time the stream was built. + * + * This allows cache flush elimination for the automatic + * flush+invalidate(all) done at submission time, which is needed to + * ensure the GPU doesn't get garbage when reading the indirect command + * stream buffers. If you want the cache flush to happen + * unconditionally, pass a zero here. + * + * Ignored when stream_size is zero. + */ + __u32 latest_flush; + + /** @pad: MBZ. */ + __u32 pad; + + /** @syncs: Array of struct drm_panthor_sync_op sync operations. */ + struct drm_panthor_obj_array syncs; +}; + +/** + * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT + */ +struct drm_panthor_group_submit { + /** @group_handle: Handle of the group to queue jobs to. */ + __u32 group_handle; + + /** @pad: MBZ. */ + __u32 pad; + + /** @queue_submits: Array of drm_panthor_queue_submit objects. */ + struct drm_panthor_obj_array queue_submits; +}; + +/** + * enum drm_panthor_group_state_flags - Group state flags + */ +enum drm_panthor_group_state_flags { + /** + * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs. + * + * When a group ends up with this flag set, no jobs can be submitted to its queues. + */ + DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0, + + /** + * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults. + * + * When a group ends up with this flag set, no jobs can be submitted to its queues. + */ + DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1, +}; + +/** + * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE + * + * Used to query the state of a group and decide whether a new group should be created to + * replace it. + */ +struct drm_panthor_group_get_state { + /** @group_handle: Handle of the group to query state on */ + __u32 group_handle; + + /** + * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the + * group state. + */ + __u32 state; + + /** @fatal_queues: Bitmask of queues that faced fatal faults. */ + __u32 fatal_queues; + + /** @pad: MBZ */ + __u32 pad; +}; + +/** + * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE + */ +struct drm_panthor_tiler_heap_create { + /** @vm_id: VM ID the tiler heap should be mapped to */ + __u32 vm_id; + + /** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */ + __u32 initial_chunk_count; + + /** + * @chunk_size: Chunk size. + * + * Must be page-aligned and lie in the [128k:8M] range. + */ + __u32 chunk_size; + + /** + * @max_chunks: Maximum number of chunks that can be allocated. + * + * Must be at least @initial_chunk_count. + */ + __u32 max_chunks; + + /** + * @target_in_flight: Maximum number of in-flight render passes. + * + * If the heap has more than tiler jobs in-flight, the FW will wait for render + * passes to finish before queuing new tiler jobs. + */ + __u32 target_in_flight; + + /** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */ + __u32 handle; + + /** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */ + __u64 tiler_heap_ctx_gpu_va; + + /** + * @first_heap_chunk_gpu_va: First heap chunk. + * + * The tiler heap is formed of heap chunks forming a single-link list. This + * is the first element in the list. + */ + __u64 first_heap_chunk_gpu_va; +}; + +/** + * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY + */ +struct drm_panthor_tiler_heap_destroy { + /** + * @handle: Handle of the tiler heap to destroy. + * + * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE. + */ + __u32 handle; + + /** @pad: Padding field, MBZ. */ + __u32 pad; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _PANTHOR_DRM_H_ */ diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 538a3ac95c..d425b83181 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -459,8 +459,16 @@ struct drm_xe_gt { * by struct drm_xe_query_mem_regions' mem_class. */ __u64 far_mem_regions; + /** @ip_ver_major: Graphics/media IP major version on GMD_ID platforms */ + __u16 ip_ver_major; + /** @ip_ver_minor: Graphics/media IP minor version on GMD_ID platforms */ + __u16 ip_ver_minor; + /** @ip_ver_rev: Graphics/media IP revision version on GMD_ID platforms */ + __u16 ip_ver_rev; + /** @pad2: MBZ */ + __u16 pad2; /** @reserved: Reserved */ - __u64 reserved[8]; + __u64 reserved[7]; }; /** @@ -510,9 +518,9 @@ struct drm_xe_query_topology_mask { /** @gt_id: GT ID the mask is associated with */ __u16 gt_id; -#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0) -#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1) -#define DRM_XE_TOPO_EU_PER_DSS (1 << 2) +#define DRM_XE_TOPO_DSS_GEOMETRY 1 +#define DRM_XE_TOPO_DSS_COMPUTE 2 +#define DRM_XE_TOPO_EU_PER_DSS 4 /** @type: type of mask */ __u16 type; @@ -583,6 +591,7 @@ struct drm_xe_query_engine_cycles { struct drm_xe_query_uc_fw_version { /** @uc_type: The micro-controller type to query firmware version */ #define XE_QUERY_UC_TYPE_GUC_SUBMISSION 0 +#define XE_QUERY_UC_TYPE_HUC 1 __u16 uc_type; /** @pad: MBZ */ @@ -767,7 +776,13 @@ struct drm_xe_gem_create { #define DRM_XE_GEM_CPU_CACHING_WC 2 /** * @cpu_caching: The CPU caching mode to select for this object. If - * mmaping the object the mode selected here will also be used. + * mmaping the object the mode selected here will also be used. The + * exception is when mapping system memory (including data evicted + * to system) on discrete GPUs. The caching mode selected will + * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency + * between GPU- and CPU is guaranteed. The caching mode of + * existing CPU-mappings will be updated transparently to + * user-space clients. */ __u16 cpu_caching; /** @pad: MBZ */ @@ -862,6 +877,12 @@ struct drm_xe_vm_destroy { * - %DRM_XE_VM_BIND_OP_PREFETCH * * and the @flags can be: + * - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only + * to ensure write protection + * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the + * MAP operation immediately rather than deferring the MAP to the page + * fault handler. This is implied on a non-faulting VM as there is no + * fault handler to defer to. * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page * tables are setup with a special bit which indicates writes are * dropped and all reads return zero. In the future, the NULL flags @@ -954,6 +975,8 @@ struct drm_xe_vm_bind_op { /** @op: Bind operation to perform */ __u32 op; +#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0) +#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1) #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) /** @flags: Bind flags */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bcd84985fa..90706a47f6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1115,6 +1115,7 @@ enum bpf_attach_type { BPF_CGROUP_UNIX_GETSOCKNAME, BPF_NETKIT_PRIMARY, BPF_NETKIT_PEER, + BPF_TRACE_KPROBE_SESSION, __MAX_BPF_ATTACH_TYPE }; @@ -1135,6 +1136,7 @@ enum bpf_link_type { BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, __MAX_BPF_LINK_TYPE, }; @@ -1662,8 +1664,10 @@ union bpf_attr { } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ - __u64 name; - __u32 prog_fd; + __u64 name; + __u32 prog_fd; + __u32 :32; + __aligned_u64 cookie; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ @@ -3392,6 +3396,10 @@ union bpf_attr { * for the nexthop. If the src addr cannot be derived, * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this * case, *params*->dmac and *params*->smac are not set either. + * **BPF_FIB_LOOKUP_MARK** + * Use the mark present in *params*->mark for the fib lookup. + * This option should not be used with BPF_FIB_LOOKUP_DIRECT, + * as it only has meaning for full lookups. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -5020,7 +5028,7 @@ union bpf_attr { * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, - * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if + * **-EOPNOTSUPP** if IMA is disabled or **-EINVAL** if * invalid arguments are passed. * * struct socket *bpf_sock_from_file(struct file *file) @@ -5506,7 +5514,7 @@ union bpf_attr { * bytes will be copied to *dst* * Return * The **hash_algo** is returned on success, - * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if * invalid arguments are passed. * * void *bpf_kptr_xchg(void *map_value, void *ptr) @@ -6718,6 +6726,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; }; } __attribute__((aligned(8))); @@ -6936,6 +6948,8 @@ enum { * socket transition to LISTEN state. */ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. + * Arg1: measured RTT input (mrtt) + * Arg2: updated srtt */ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option. * It will be called to handle @@ -7118,6 +7132,7 @@ enum { BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), BPF_FIB_LOOKUP_TBID = (1U << 3), BPF_FIB_LOOKUP_SRC = (1U << 4), + BPF_FIB_LOOKUP_MARK = (1U << 5), }; enum { @@ -7195,8 +7210,19 @@ struct bpf_fib_lookup { __u32 tbid; }; - __u8 smac[6]; /* ETH_ALEN */ - __u8 dmac[6]; /* ETH_ALEN */ + union { + /* input */ + struct { + __u32 mark; /* policy routing */ + /* 2 4-byte holes for input */ + }; + + /* output: source and dest mac */ + struct { + __u8 smac[6]; /* ETH_ALEN */ + __u8 dmac[6]; /* ETH_ALEN */ + }; + }; }; struct bpf_redir_neigh { @@ -7283,6 +7309,10 @@ struct bpf_timer { __u64 __opaque[2]; } __attribute__((aligned(8))); +struct bpf_wq { + __u64 __opaque[2]; +} __attribute__((aligned(8))); + struct bpf_dynptr { __u64 __opaque[2]; } __attribute__((aligned(8))); diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 5730c67f06..20a6c0fc14 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -32,7 +32,7 @@ enum { CRYPTO_MSG_UPDATEALG, CRYPTO_MSG_GETALG, CRYPTO_MSG_DELRNG, - CRYPTO_MSG_GETSTAT, + CRYPTO_MSG_GETSTAT, /* No longer supported, do not use. */ __CRYPTO_MSG_MAX }; #define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) @@ -54,16 +54,16 @@ enum crypto_attr_type_t { CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ - CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */ - CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */ - CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */ - CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */ - CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */ - CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */ - CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */ - CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */ - CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */ - CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */ + CRYPTOCFGA_STAT_LARVAL, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_HASH, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_BLKCIPHER, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_AEAD, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_COMPRESS, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_RNG, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_CIPHER, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_KPP, /* No longer supported, do not use. */ + CRYPTOCFGA_STAT_ACOMP, /* No longer supported, do not use. */ __CRYPTOCFGA_MAX #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) @@ -79,6 +79,7 @@ struct crypto_user_alg { __u32 cru_flags; }; +/* No longer supported, do not use. */ struct crypto_stat_aead { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -88,6 +89,7 @@ struct crypto_stat_aead { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_akcipher { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -99,6 +101,7 @@ struct crypto_stat_akcipher { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_cipher { char type[CRYPTO_MAX_NAME]; __u64 stat_encrypt_cnt; @@ -108,6 +111,7 @@ struct crypto_stat_cipher { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_compress { char type[CRYPTO_MAX_NAME]; __u64 stat_compress_cnt; @@ -117,6 +121,7 @@ struct crypto_stat_compress { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_hash { char type[CRYPTO_MAX_NAME]; __u64 stat_hash_cnt; @@ -124,6 +129,7 @@ struct crypto_stat_hash { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_kpp { char type[CRYPTO_MAX_NAME]; __u64 stat_setsecret_cnt; @@ -132,6 +138,7 @@ struct crypto_stat_kpp { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_rng { char type[CRYPTO_MAX_NAME]; __u64 stat_generate_cnt; @@ -140,6 +147,7 @@ struct crypto_stat_rng { __u64 stat_err_cnt; }; +/* No longer supported, do not use. */ struct crypto_stat_larval { char type[CRYPTO_MAX_NAME]; }; diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 42066f4eb8..c6c0fe2749 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -47,6 +47,9 @@ ___DEPRECATED(SCAN_MEDIA, "Scan Media"), \ ___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ___C(GET_TIMESTAMP, "Get Timestamp"), \ + ___C(GET_LOG_CAPS, "Get Log Capabilities"), \ + ___C(CLEAR_LOG, "Clear Log"), \ + ___C(GET_SUP_LOG_SUBLIST, "Get Supported Logs Sub-List"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 2da0c7eb67..9401aa3436 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -686,6 +686,7 @@ enum devlink_port_function_attr { DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */ DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */ DEVLINK_PORT_FN_ATTR_DEVLINK, /* nested */ + DEVLINK_PORT_FN_ATTR_MAX_IO_EQS, /* u32 */ __DEVLINK_PORT_FUNCTION_ATTR_MAX, DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1 diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h index 7e0983b987..8d38c6befd 100644 --- a/include/uapi/linux/dvb/frontend.h +++ b/include/uapi/linux/dvb/frontend.h @@ -854,7 +854,7 @@ struct dtv_stats { union { __u64 uvalue; /* for counters and relative scales */ __s64 svalue; /* for 0.001 dB measures */ - }; + } __attribute__ ((packed)); } __attribute__ ((packed)); diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 11fc18988b..8733a31179 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -753,6 +753,61 @@ enum ethtool_module_power_mode { }; /** + * enum ethtool_pse_types - Types of PSE controller. + * @ETHTOOL_PSE_UNKNOWN: Type of PSE controller is unknown + * @ETHTOOL_PSE_PODL: PSE controller which support PoDL + * @ETHTOOL_PSE_C33: PSE controller which support Clause 33 (PoE) + */ +enum ethtool_pse_types { + ETHTOOL_PSE_UNKNOWN = 1 << 0, + ETHTOOL_PSE_PODL = 1 << 1, + ETHTOOL_PSE_C33 = 1 << 2, +}; + +/** + * enum ethtool_c33_pse_admin_state - operational state of the PoDL PSE + * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState + * @ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN: state of PSE functions is unknown + * @ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED: PSE functions are disabled + * @ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED: PSE functions are enabled + */ +enum ethtool_c33_pse_admin_state { + ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED, + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED, +}; + +/** + * enum ethtool_c33_pse_pw_d_status - power detection status of the PSE. + * IEEE 802.3-2022 30.9.1.1.3 aPoDLPSEPowerDetectionStatus: + * @ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN: PSE status is unknown + * @ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED: The enumeration "disabled" + * indicates that the PSE State diagram is in the state DISABLED. + * @ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING: The enumeration "searching" + * indicates the PSE State diagram is in a state other than those + * listed. + * @ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING: The enumeration + * "deliveringPower" indicates that the PSE State diagram is in the + * state POWER_ON. + * @ETHTOOL_C33_PSE_PW_D_STATUS_TEST: The enumeration "test" indicates that + * the PSE State diagram is in the state TEST_MODE. + * @ETHTOOL_C33_PSE_PW_D_STATUS_FAULT: The enumeration "fault" indicates that + * the PSE State diagram is in the state TEST_ERROR. + * @ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT: The enumeration "otherFault" + * indicates that the PSE State diagram is in the state IDLE due to + * the variable error_condition = true. + */ +enum ethtool_c33_pse_pw_d_status { + ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED, + ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING, + ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING, + ETHTOOL_C33_PSE_PW_D_STATUS_TEST, + ETHTOOL_C33_PSE_PW_D_STATUS_FAULT, + ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT, +}; + +/** * enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState * @ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN: state of PoDL PSE functions are diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 3f89074aa0..b49b804b94 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -117,12 +117,11 @@ enum { /* request header */ -/* use compact bitsets in reply */ -#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0) -/* provide optional reply for SET or ACT requests */ -#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1) -/* request statistics, if supported by the driver */ -#define ETHTOOL_FLAG_STATS (1 << 2) +enum ethtool_header_flags { + ETHTOOL_FLAG_COMPACT_BITSETS = 1 << 0, /* use compact bitsets in reply */ + ETHTOOL_FLAG_OMIT_REPLY = 1 << 1, /* provide optional reply for SET or ACT requests */ + ETHTOOL_FLAG_STATS = 1 << 2, /* request statistics, if supported by the driver */ +}; #define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \ ETHTOOL_FLAG_OMIT_REPLY | \ @@ -478,12 +477,26 @@ enum { ETHTOOL_A_TSINFO_TX_TYPES, /* bitset */ ETHTOOL_A_TSINFO_RX_FILTERS, /* bitset */ ETHTOOL_A_TSINFO_PHC_INDEX, /* u32 */ + ETHTOOL_A_TSINFO_STATS, /* nest - _A_TSINFO_STAT */ /* add new constants above here */ __ETHTOOL_A_TSINFO_CNT, ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) }; +enum { + ETHTOOL_A_TS_STAT_UNSPEC, + + ETHTOOL_A_TS_STAT_TX_PKTS, /* uint */ + ETHTOOL_A_TS_STAT_TX_LOST, /* uint */ + ETHTOOL_A_TS_STAT_TX_ERR, /* uint */ + + /* add new constants above here */ + __ETHTOOL_A_TS_STAT_CNT, + ETHTOOL_A_TS_STAT_MAX = (__ETHTOOL_A_TS_STAT_CNT - 1) + +}; + /* PHC VCLOCKS */ enum { @@ -515,6 +528,10 @@ enum { ETHTOOL_A_CABLE_RESULT_CODE_OPEN, ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT, ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT, + /* detected reflection caused by the impedance discontinuity between + * a regular 100 Ohm cable and a part with the abnormal impedance value + */ + ETHTOOL_A_CABLE_RESULT_CODE_IMPEDANCE_MISMATCH, }; enum { @@ -895,6 +912,9 @@ enum { ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */ ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, /* u32 */ ETHTOOL_A_PODL_PSE_PW_D_STATUS, /* u32 */ + ETHTOOL_A_C33_PSE_ADMIN_STATE, /* u32 */ + ETHTOOL_A_C33_PSE_ADMIN_CONTROL, /* u32 */ + ETHTOOL_A_C33_PSE_PW_D_STATUS, /* u32 */ /* add new constants above here */ __ETHTOOL_A_PSE_CNT, diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index 282e90aeb1..c0bcc185fa 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -9,6 +9,14 @@ #define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1) /* + * Request nofications on a directory. + * See below for events that may be notified. + */ +#define F_NOTIFY (F_LINUX_SPECIFIC_BASE + 2) + +#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3) + +/* * Cancel a blocking posix lock; internal use only until we expose an * asynchronous lock api to userspace: */ @@ -18,12 +26,6 @@ #define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6) /* - * Request nofications on a directory. - * See below for events that may be notified. - */ -#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2) - -/* * Set and get of pipe page size array */ #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7) diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index 3dcdb9e33c..40f5388d6d 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -31,6 +31,9 @@ enum gtp_attrs { GTPA_I_TEI, /* for GTPv1 only */ GTPA_O_TEI, /* for GTPv1 only */ GTPA_PAD, + GTPA_PEER_ADDR6, + GTPA_MS_ADDR6, + GTPA_FAMILY, __GTPA_MAX, }; #define GTPA_MAX (__GTPA_MAX - 1) diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h index ecaece3af3..4eaab89e28 100644 --- a/include/uapi/linux/icmpv6.h +++ b/include/uapi/linux/icmpv6.h @@ -112,6 +112,7 @@ struct icmp6hdr { #define ICMPV6_MOBILE_PREFIX_ADV 147 #define ICMPV6_MRDISC_ADV 151 +#define ICMPV6_MRDISC_SOL 152 #define ICMPV6_MSG_MAX 255 diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ffa637b38c..6dc258993b 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1466,6 +1466,8 @@ enum { IFLA_GTP_ROLE, IFLA_GTP_CREATE_SOCKETS, IFLA_GTP_RESTART_COUNT, + IFLA_GTP_LOCAL, + IFLA_GTP_LOCAL6, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -1771,6 +1773,7 @@ enum { IFLA_HSR_PROTOCOL, /* Indicate different protocol than * HSR. For example PRP. */ + IFLA_HSR_INTERLINK, /* HSR interlink network device */ __IFLA_HSR_MAX, }; diff --git a/include/uapi/linux/if_team.h b/include/uapi/linux/if_team.h index 13c61fecb7..a5c06243a4 100644 --- a/include/uapi/linux/if_team.h +++ b/include/uapi/linux/if_team.h @@ -1,108 +1,78 @@ -/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -/* - * include/linux/if_team.h - Network team device driver header - * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/team.yaml */ +/* YNL-GEN uapi header */ -#ifndef _UAPI_LINUX_IF_TEAM_H_ -#define _UAPI_LINUX_IF_TEAM_H_ +#ifndef _UAPI_LINUX_IF_TEAM_H +#define _UAPI_LINUX_IF_TEAM_H +#define TEAM_GENL_NAME "team" +#define TEAM_GENL_VERSION 1 -#define TEAM_STRING_MAX_LEN 32 - -/********************************** - * NETLINK_GENERIC netlink family. - **********************************/ - -enum { - TEAM_CMD_NOOP, - TEAM_CMD_OPTIONS_SET, - TEAM_CMD_OPTIONS_GET, - TEAM_CMD_PORT_LIST_GET, - - __TEAM_CMD_MAX, - TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1), -}; +#define TEAM_STRING_MAX_LEN 32 +#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event" enum { TEAM_ATTR_UNSPEC, - TEAM_ATTR_TEAM_IFINDEX, /* u32 */ - TEAM_ATTR_LIST_OPTION, /* nest */ - TEAM_ATTR_LIST_PORT, /* nest */ + TEAM_ATTR_TEAM_IFINDEX, + TEAM_ATTR_LIST_OPTION, + TEAM_ATTR_LIST_PORT, __TEAM_ATTR_MAX, - TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1, + TEAM_ATTR_MAX = (__TEAM_ATTR_MAX - 1) }; -/* Nested layout of get/set msg: - * - * [TEAM_ATTR_LIST_OPTION] - * [TEAM_ATTR_ITEM_OPTION] - * [TEAM_ATTR_OPTION_*], ... - * [TEAM_ATTR_ITEM_OPTION] - * [TEAM_ATTR_OPTION_*], ... - * ... - * [TEAM_ATTR_LIST_PORT] - * [TEAM_ATTR_ITEM_PORT] - * [TEAM_ATTR_PORT_*], ... - * [TEAM_ATTR_ITEM_PORT] - * [TEAM_ATTR_PORT_*], ... - * ... - */ - enum { TEAM_ATTR_ITEM_OPTION_UNSPEC, - TEAM_ATTR_ITEM_OPTION, /* nest */ + TEAM_ATTR_ITEM_OPTION, __TEAM_ATTR_ITEM_OPTION_MAX, - TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1, + TEAM_ATTR_ITEM_OPTION_MAX = (__TEAM_ATTR_ITEM_OPTION_MAX - 1) }; enum { TEAM_ATTR_OPTION_UNSPEC, - TEAM_ATTR_OPTION_NAME, /* string */ - TEAM_ATTR_OPTION_CHANGED, /* flag */ - TEAM_ATTR_OPTION_TYPE, /* u8 */ - TEAM_ATTR_OPTION_DATA, /* dynamic */ - TEAM_ATTR_OPTION_REMOVED, /* flag */ - TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */ - TEAM_ATTR_OPTION_ARRAY_INDEX, /* u32 */ /* for array options */ + TEAM_ATTR_OPTION_NAME, + TEAM_ATTR_OPTION_CHANGED, + TEAM_ATTR_OPTION_TYPE, + TEAM_ATTR_OPTION_DATA, + TEAM_ATTR_OPTION_REMOVED, + TEAM_ATTR_OPTION_PORT_IFINDEX, + TEAM_ATTR_OPTION_ARRAY_INDEX, __TEAM_ATTR_OPTION_MAX, - TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, + TEAM_ATTR_OPTION_MAX = (__TEAM_ATTR_OPTION_MAX - 1) }; enum { TEAM_ATTR_ITEM_PORT_UNSPEC, - TEAM_ATTR_ITEM_PORT, /* nest */ + TEAM_ATTR_ITEM_PORT, __TEAM_ATTR_ITEM_PORT_MAX, - TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1, + TEAM_ATTR_ITEM_PORT_MAX = (__TEAM_ATTR_ITEM_PORT_MAX - 1) }; enum { TEAM_ATTR_PORT_UNSPEC, - TEAM_ATTR_PORT_IFINDEX, /* u32 */ - TEAM_ATTR_PORT_CHANGED, /* flag */ - TEAM_ATTR_PORT_LINKUP, /* flag */ - TEAM_ATTR_PORT_SPEED, /* u32 */ - TEAM_ATTR_PORT_DUPLEX, /* u8 */ - TEAM_ATTR_PORT_REMOVED, /* flag */ + TEAM_ATTR_PORT_IFINDEX, + TEAM_ATTR_PORT_CHANGED, + TEAM_ATTR_PORT_LINKUP, + TEAM_ATTR_PORT_SPEED, + TEAM_ATTR_PORT_DUPLEX, + TEAM_ATTR_PORT_REMOVED, __TEAM_ATTR_PORT_MAX, - TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, + TEAM_ATTR_PORT_MAX = (__TEAM_ATTR_PORT_MAX - 1) }; -/* - * NETLINK_GENERIC related info - */ -#define TEAM_GENL_NAME "team" -#define TEAM_GENL_VERSION 0x1 -#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event" +enum { + TEAM_CMD_NOOP, + TEAM_CMD_OPTIONS_SET, + TEAM_CMD_OPTIONS_GET, + TEAM_CMD_PORT_LIST_GET, + + __TEAM_CMD_MAX, + TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1) +}; -#endif /* _UAPI_LINUX_IF_TEAM_H_ */ +#endif /* _UAPI_LINUX_IF_TEAM_H */ diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 102119628f..e1a246dd8c 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -161,6 +161,14 @@ enum { #define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1) +#ifndef __KERNEL__ +/* Historically, tunnel flags have been defined as __be16 and now there are + * no free bits left. It is strongly advised to switch the already existing + * userspace code to the new *_BIT definitions from down below, as __be16 + * can't be simply cast to a wider type on LE systems. All new flags and + * code must use *_BIT only. + */ + #define TUNNEL_CSUM __cpu_to_be16(0x01) #define TUNNEL_ROUTING __cpu_to_be16(0x02) #define TUNNEL_KEY __cpu_to_be16(0x04) @@ -181,5 +189,33 @@ enum { #define TUNNEL_OPTIONS_PRESENT \ (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \ TUNNEL_GTP_OPT) +#endif + +enum { + IP_TUNNEL_CSUM_BIT = 0U, + IP_TUNNEL_ROUTING_BIT, + IP_TUNNEL_KEY_BIT, + IP_TUNNEL_SEQ_BIT, + IP_TUNNEL_STRICT_BIT, + IP_TUNNEL_REC_BIT, + IP_TUNNEL_VERSION_BIT, + IP_TUNNEL_NO_KEY_BIT, + IP_TUNNEL_DONT_FRAGMENT_BIT, + IP_TUNNEL_OAM_BIT, + IP_TUNNEL_CRIT_OPT_BIT, + IP_TUNNEL_GENEVE_OPT_BIT, /* OPTIONS_PRESENT */ + IP_TUNNEL_VXLAN_OPT_BIT, /* OPTIONS_PRESENT */ + IP_TUNNEL_NOCACHE_BIT, + IP_TUNNEL_ERSPAN_OPT_BIT, /* OPTIONS_PRESENT */ + IP_TUNNEL_GTP_OPT_BIT, /* OPTIONS_PRESENT */ + + IP_TUNNEL_VTI_BIT, + IP_TUNNEL_SIT_ISATAP_BIT = IP_TUNNEL_VTI_BIT, + + /* Flags starting from here are not available via the old UAPI */ + IP_TUNNEL_PFCP_OPT_BIT, /* OPTIONS_PRESENT */ + + __IP_TUNNEL_FLAG_NUM, +}; #endif /* _UAPI_IF_TUNNEL_H_ */ diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index d316984104..42ec5ddaab 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -41,6 +41,10 @@ */ #define XDP_UMEM_TX_SW_CSUM (1 << 1) +/* Request to reserve tx_metadata_len bytes of per-chunk metadata. + */ +#define XDP_UMEM_TX_METADATA_LEN (1 << 2) + struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 7bd10201a0..994bf7af0e 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -72,6 +72,7 @@ struct io_uring_sqe { __u32 waitid_flags; __u32 futex_flags; __u32 install_fd_flags; + __u32 nop_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -115,7 +116,7 @@ struct io_uring_sqe { */ #define IORING_FILE_INDEX_ALLOC (~0U) -enum { +enum io_uring_sqe_flags_bit { IOSQE_FIXED_FILE_BIT, IOSQE_IO_DRAIN_BIT, IOSQE_IO_LINK_BIT, @@ -351,11 +352,20 @@ enum io_uring_op { * 0 is reported if zerocopy was actually possible. * IORING_NOTIF_USAGE_ZC_COPIED if data was copied * (at least partially). + * + * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or + * recv will grab as many buffers from the buffer + * group ID given and send them all. The completion + * result will be the number of buffers send, with + * the starting buffer ID in cqe->flags as per + * usual for provided buffer usage. The buffers + * will be contigious from the starting buffer ID. */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) #define IORING_RECV_MULTISHOT (1U << 1) #define IORING_RECVSEND_FIXED_BUF (1U << 2) #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) +#define IORING_RECVSEND_BUNDLE (1U << 4) /* * cqe.res for IORING_CQE_F_NOTIF if @@ -370,11 +380,13 @@ enum io_uring_op { * accept flags stored in sqe->ioprio */ #define IORING_ACCEPT_MULTISHOT (1U << 0) +#define IORING_ACCEPT_DONTWAIT (1U << 1) +#define IORING_ACCEPT_POLL_FIRST (1U << 2) /* * IORING_OP_MSG_RING command types, stored in sqe->addr */ -enum { +enum io_uring_msg_ring_flags { IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */ IORING_MSG_SEND_FD, /* send a registered fd to another ring */ }; @@ -397,6 +409,13 @@ enum { #define IORING_FIXED_FD_NO_CLOEXEC (1U << 0) /* + * IORING_OP_NOP flags (sqe->nop_flags) + * + * IORING_NOP_INJECT_RESULT Inject result from sqe->result + */ +#define IORING_NOP_INJECT_RESULT (1U << 0) + +/* * IO completion data structure (Completion Queue Entry) */ struct io_uring_cqe { @@ -425,9 +444,7 @@ struct io_uring_cqe { #define IORING_CQE_F_SOCK_NONEMPTY (1U << 2) #define IORING_CQE_F_NOTIF (1U << 3) -enum { - IORING_CQE_BUFFER_SHIFT = 16, -}; +#define IORING_CQE_BUFFER_SHIFT 16 /* * Magic offsets for the application to mmap the data it needs @@ -522,11 +539,12 @@ struct io_uring_params { #define IORING_FEAT_CQE_SKIP (1U << 11) #define IORING_FEAT_LINKED_FILE (1U << 12) #define IORING_FEAT_REG_REG_RING (1U << 13) +#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14) /* * io_uring_register(2) opcodes and arguments */ -enum { +enum io_uring_register_op { IORING_REGISTER_BUFFERS = 0, IORING_UNREGISTER_BUFFERS = 1, IORING_REGISTER_FILES = 2, @@ -583,7 +601,7 @@ enum { }; /* io-wq worker categories */ -enum { +enum io_wq_type { IO_WQ_BOUND, IO_WQ_UNBOUND, }; @@ -688,7 +706,7 @@ struct io_uring_buf_ring { * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT) * to get a virtual mapping for the ring. */ -enum { +enum io_uring_register_pbuf_ring_flags { IOU_PBUF_RING_MMAP = 1, }; @@ -719,7 +737,7 @@ struct io_uring_napi { /* * io_uring_restriction->opcode values */ -enum { +enum io_uring_register_restriction_op { /* Allow an io_uring_register(2) opcode */ IORING_RESTRICTION_REGISTER_OP = 0, @@ -775,7 +793,7 @@ struct io_uring_recvmsg_out { /* * Argument for IORING_OP_URING_CMD when file is a socket */ -enum { +enum io_uring_socket_op { SOCKET_URING_OP_SIOCINQ = 0, SOCKET_URING_OP_SIOCOUTQ, SOCKET_URING_OP_GETSOCKOPT, diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index c17bb096ea..5ae1741ea8 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -13,6 +13,7 @@ #define KEXEC_ON_CRASH 0x00000001 #define KEXEC_PRESERVE_CONTEXT 0x00000002 #define KEXEC_UPDATE_ELFCOREHDR 0x00000004 +#define KEXEC_CRASH_HOTPLUG_SUPPORT 0x00000008 #define KEXEC_ARCH_MASK 0xffff0000 /* diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2190adbe30..d03842abae 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1221,9 +1221,9 @@ struct kvm_vfio_spapr_tce { /* Available with KVM_CAP_SPAPR_RESIZE_HPT */ #define KVM_PPC_RESIZE_HPT_PREPARE _IOR(KVMIO, 0xad, struct kvm_ppc_resize_hpt) #define KVM_PPC_RESIZE_HPT_COMMIT _IOR(KVMIO, 0xae, struct kvm_ppc_resize_hpt) -/* Available with KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_HASH_MMU_V3 */ +/* Available with KVM_CAP_PPC_MMU_RADIX or KVM_CAP_PPC_MMU_HASH_V3 */ #define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg) -/* Available with KVM_CAP_PPC_RADIX_MMU */ +/* Available with KVM_CAP_PPC_MMU_RADIX */ #define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) /* Available with KVM_CAP_PPC_GET_CPU_CHAR */ #define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 25c8d76775..68625e728f 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -128,7 +128,7 @@ struct landlock_net_port_attr { * files and directories. Files or directories opened before the sandboxing * are not subject to these restrictions. * - * A file can only receive these access rights: + * The following access rights apply only to files: * * - %LANDLOCK_ACCESS_FS_EXECUTE: Execute a file. * - %LANDLOCK_ACCESS_FS_WRITE_FILE: Open a file with write access. Note that @@ -138,12 +138,13 @@ struct landlock_net_port_attr { * - %LANDLOCK_ACCESS_FS_READ_FILE: Open a file with read access. * - %LANDLOCK_ACCESS_FS_TRUNCATE: Truncate a file with :manpage:`truncate(2)`, * :manpage:`ftruncate(2)`, :manpage:`creat(2)`, or :manpage:`open(2)` with - * ``O_TRUNC``. Whether an opened file can be truncated with - * :manpage:`ftruncate(2)` is determined during :manpage:`open(2)`, in the - * same way as read and write permissions are checked during - * :manpage:`open(2)` using %LANDLOCK_ACCESS_FS_READ_FILE and - * %LANDLOCK_ACCESS_FS_WRITE_FILE. This access right is available since the - * third version of the Landlock ABI. + * ``O_TRUNC``. This access right is available since the third version of the + * Landlock ABI. + * + * Whether an opened file can be truncated with :manpage:`ftruncate(2)` or used + * with `ioctl(2)` is determined during :manpage:`open(2)`, in the same way as + * read and write permissions are checked during :manpage:`open(2)` using + * %LANDLOCK_ACCESS_FS_READ_FILE and %LANDLOCK_ACCESS_FS_WRITE_FILE. * * A directory can receive access rights related to files or directories. The * following access right is applied to the directory itself, and the @@ -198,13 +199,33 @@ struct landlock_net_port_attr { * If multiple requirements are not met, the ``EACCES`` error code takes * precedence over ``EXDEV``. * + * The following access right applies both to files and directories: + * + * - %LANDLOCK_ACCESS_FS_IOCTL_DEV: Invoke :manpage:`ioctl(2)` commands on an opened + * character or block device. + * + * This access right applies to all `ioctl(2)` commands implemented by device + * drivers. However, the following common IOCTL commands continue to be + * invokable independent of the %LANDLOCK_ACCESS_FS_IOCTL_DEV right: + * + * * IOCTL commands targeting file descriptors (``FIOCLEX``, ``FIONCLEX``), + * * IOCTL commands targeting file descriptions (``FIONBIO``, ``FIOASYNC``), + * * IOCTL commands targeting file systems (``FIFREEZE``, ``FITHAW``, + * ``FIGETBSZ``, ``FS_IOC_GETFSUUID``, ``FS_IOC_GETFSSYSFSPATH``) + * * Some IOCTL commands which do not make sense when used with devices, but + * whose implementations are safe and return the right error codes + * (``FS_IOC_FIEMAP``, ``FICLONE``, ``FICLONERANGE``, ``FIDEDUPERANGE``) + * + * This access right is available since the fifth version of the Landlock + * ABI. + * * .. warning:: * * It is currently not possible to restrict some file-related actions * accessible through these syscall families: :manpage:`chdir(2)`, * :manpage:`stat(2)`, :manpage:`flock(2)`, :manpage:`chmod(2)`, * :manpage:`chown(2)`, :manpage:`setxattr(2)`, :manpage:`utime(2)`, - * :manpage:`ioctl(2)`, :manpage:`fcntl(2)`, :manpage:`access(2)`. + * :manpage:`fcntl(2)`, :manpage:`access(2)`. * Future Landlock evolutions will enable to restrict them. */ /* clang-format off */ @@ -223,6 +244,7 @@ struct landlock_net_port_attr { #define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12) #define LANDLOCK_ACCESS_FS_REFER (1ULL << 13) #define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14) +#define LANDLOCK_ACCESS_FS_IOCTL_DEV (1ULL << 15) /* clang-format on */ /** diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 1b40a968ba..bb575f3ab4 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -37,6 +37,7 @@ #define HOSTFS_SUPER_MAGIC 0x00c0ffee #define OVERLAYFS_SUPER_MAGIC 0x794c7630 #define FUSE_SUPER_MAGIC 0x65735546 +#define BCACHEFS_SUPER_MAGIC 0xca451a4e #define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */ #define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */ diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index f05f747e44..d4c1d99101 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -174,4 +174,13 @@ */ #define MEDIA_BUS_FMT_METADATA_FIXED 0x7001 +/* Generic line based metadata formats for serial buses. Next is 0x8008. */ +#define MEDIA_BUS_FMT_META_8 0x8001 +#define MEDIA_BUS_FMT_META_10 0x8002 +#define MEDIA_BUS_FMT_META_12 0x8003 +#define MEDIA_BUS_FMT_META_14 0x8004 +#define MEDIA_BUS_FMT_META_16 0x8005 +#define MEDIA_BUS_FMT_META_20 0x8006 +#define MEDIA_BUS_FMT_META_24 0x8007 + #endif /* __LINUX_MEDIA_BUS_FORMAT_H */ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 74cfe49689..67d015df88 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -58,6 +58,10 @@ struct mptcp_info { __u64 mptcpi_bytes_received; __u64 mptcpi_bytes_acked; __u8 mptcpi_subflows_total; + __u8 reserved[3]; + __u32 mptcpi_last_data_sent; + __u32 mptcpi_last_data_recv; + __u32 mptcpi_last_ack_recv; }; /* MPTCP Reset reason codes, rfc8684 */ diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index bb65ee840c..43742ac5b0 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -146,6 +146,28 @@ enum { NETDEV_A_QSTATS_TX_PACKETS, NETDEV_A_QSTATS_TX_BYTES, NETDEV_A_QSTATS_RX_ALLOC_FAIL, + NETDEV_A_QSTATS_RX_HW_DROPS, + NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, + NETDEV_A_QSTATS_RX_CSUM_COMPLETE, + NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, + NETDEV_A_QSTATS_RX_CSUM_NONE, + NETDEV_A_QSTATS_RX_CSUM_BAD, + NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, + NETDEV_A_QSTATS_RX_HW_GRO_BYTES, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, + NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, + NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, + NETDEV_A_QSTATS_TX_HW_DROPS, + NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, + NETDEV_A_QSTATS_TX_CSUM_NONE, + NETDEV_A_QSTATS_TX_NEEDS_CSUM, + NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, + NETDEV_A_QSTATS_TX_HW_GSO_BYTES, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, + NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, + NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, + NETDEV_A_QSTATS_TX_STOP, + NETDEV_A_QSTATS_TX_WAKE, __NETDEV_A_QSTATS_MAX, NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index aa4094ca24..639894ed1b 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1376,7 +1376,7 @@ enum nft_secmark_attributes { #define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1) /* Max security context length */ -#define NFT_SECMARK_CTX_MAXLEN 256 +#define NFT_SECMARK_CTX_MAXLEN 4096 /** * enum nft_reject_types - nf_tables reject expression reject types diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index 946cb62d64..f356f2ba38 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h @@ -61,7 +61,6 @@ NFSERR_NOSPC = 28, /* v2 v3 v4 */ NFSERR_ROFS = 30, /* v2 v3 v4 */ NFSERR_MLINK = 31, /* v3 v4 */ - NFSERR_OPNOTSUPP = 45, /* v2 v3 */ NFSERR_NAMETOOLONG = 63, /* v2 v3 v4 */ NFSERR_NOTEMPTY = 66, /* v2 v3 v4 */ NFSERR_DQUOT = 69, /* v2 v3 v4 */ diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h index 3cd044edee..24c86dbc7e 100644 --- a/include/uapi/linux/nfsd_netlink.h +++ b/include/uapi/linux/nfsd_netlink.h @@ -30,7 +30,54 @@ enum { }; enum { + NFSD_A_SERVER_THREADS = 1, + NFSD_A_SERVER_GRACETIME, + NFSD_A_SERVER_LEASETIME, + NFSD_A_SERVER_SCOPE, + + __NFSD_A_SERVER_MAX, + NFSD_A_SERVER_MAX = (__NFSD_A_SERVER_MAX - 1) +}; + +enum { + NFSD_A_VERSION_MAJOR = 1, + NFSD_A_VERSION_MINOR, + NFSD_A_VERSION_ENABLED, + + __NFSD_A_VERSION_MAX, + NFSD_A_VERSION_MAX = (__NFSD_A_VERSION_MAX - 1) +}; + +enum { + NFSD_A_SERVER_PROTO_VERSION = 1, + + __NFSD_A_SERVER_PROTO_MAX, + NFSD_A_SERVER_PROTO_MAX = (__NFSD_A_SERVER_PROTO_MAX - 1) +}; + +enum { + NFSD_A_SOCK_ADDR = 1, + NFSD_A_SOCK_TRANSPORT_NAME, + + __NFSD_A_SOCK_MAX, + NFSD_A_SOCK_MAX = (__NFSD_A_SOCK_MAX - 1) +}; + +enum { + NFSD_A_SERVER_SOCK_ADDR = 1, + + __NFSD_A_SERVER_SOCK_MAX, + NFSD_A_SERVER_SOCK_MAX = (__NFSD_A_SERVER_SOCK_MAX - 1) +}; + +enum { NFSD_CMD_RPC_STATUS_GET = 1, + NFSD_CMD_THREADS_SET, + NFSD_CMD_THREADS_GET, + NFSD_CMD_VERSION_SET, + NFSD_CMD_VERSION_GET, + NFSD_CMD_LISTENER_SET, + NFSD_CMD_LISTENER_GET, __NFSD_CMD_MAX, NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f23ecbdd84..f917bc6c9b 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -413,8 +413,8 @@ * are like for %NL80211_CMD_SET_BEACON, and additionally parameters that * do not change are used, these include %NL80211_ATTR_BEACON_INTERVAL, * %NL80211_ATTR_DTIM_PERIOD, %NL80211_ATTR_SSID, - * %NL80211_ATTR_HIDDEN_SSID, %NL80211_ATTR_CIPHERS_PAIRWISE, - * %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS, + * %NL80211_ATTR_HIDDEN_SSID, %NL80211_ATTR_CIPHER_SUITES_PAIRWISE, + * %NL80211_ATTR_CIPHER_SUITE_GROUP, %NL80211_ATTR_WPA_VERSIONS, * %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY, * %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_INACTIVITY_TIMEOUT, * %NL80211_ATTR_ACL_POLICY and %NL80211_ATTR_MAC_ADDRS. @@ -442,20 +442,15 @@ * stations connected and using at least that link as one of its links. * * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to - * destination %NL80211_ATTR_MAC on the interface identified by - * %NL80211_ATTR_IFINDEX. + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. * @NL80211_CMD_SET_MPATH: Set mesh path attributes for mesh path to - * destination %NL80211_ATTR_MAC on the interface identified by - * %NL80211_ATTR_IFINDEX. + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. * @NL80211_CMD_NEW_MPATH: Create a new mesh path for the destination given by * %NL80211_ATTR_MAC via %NL80211_ATTR_MPATH_NEXT_HOP. * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by * %NL80211_ATTR_MAC. - * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the - * interface identified by %NL80211_ATTR_IFINDEX. - * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC - * or, if no MAC address given, all mesh paths, on the interface identified - * by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by * %NL80211_ATTR_IFINDEX. * @@ -476,15 +471,15 @@ * after being queried by the kernel. CRDA replies by sending a regulatory * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our * current alpha2 if it found a match. It also provides - * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each - * regulatory rule is a nested set of attributes given by - * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and - * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by - * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and - * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. + * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each + * regulatory rule is a nested set of attributes given by + * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and + * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by + * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and + * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain - * to the specified ISO/IEC 3166-1 alpha2 country code. The core will - * store this as a valid request and then query userspace for it. + * to the specified ISO/IEC 3166-1 alpha2 country code. The core will + * store this as a valid request and then query userspace for it. * * @NL80211_CMD_GET_MESH_CONFIG: Get mesh networking properties for the * interface identified by %NL80211_ATTR_IFINDEX @@ -574,31 +569,31 @@ * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries. * * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain - * has been changed and provides details of the request information - * that caused the change such as who initiated the regulatory request - * (%NL80211_ATTR_REG_INITIATOR), the wiphy_idx - * (%NL80211_ATTR_REG_ALPHA2) on which the request was made from if - * the initiator was %NL80211_REGDOM_SET_BY_COUNTRY_IE or - * %NL80211_REGDOM_SET_BY_DRIVER, the type of regulatory domain - * set (%NL80211_ATTR_REG_TYPE), if the type of regulatory domain is - * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on - * to (%NL80211_ATTR_REG_ALPHA2). + * has been changed and provides details of the request information + * that caused the change such as who initiated the regulatory request + * (%NL80211_ATTR_REG_INITIATOR), the wiphy_idx + * (%NL80211_ATTR_REG_ALPHA2) on which the request was made from if + * the initiator was %NL80211_REGDOM_SET_BY_COUNTRY_IE or + * %NL80211_REGDOM_SET_BY_DRIVER, the type of regulatory domain + * set (%NL80211_ATTR_REG_TYPE), if the type of regulatory domain is + * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on + * to (%NL80211_ATTR_REG_ALPHA2). * @NL80211_CMD_REG_BEACON_HINT: indicates to userspace that an AP beacon - * has been found while world roaming thus enabling active scan or - * any mode of operation that initiates TX (beacons) on a channel - * where we would not have been able to do either before. As an example - * if you are world roaming (regulatory domain set to world or if your - * driver is using a custom world roaming regulatory domain) and while - * doing a passive scan on the 5 GHz band you find an AP there (if not - * on a DFS channel) you will now be able to actively scan for that AP - * or use AP mode on your card on that same channel. Note that this will - * never be used for channels 1-11 on the 2 GHz band as they are always - * enabled world wide. This beacon hint is only sent if your device had - * either disabled active scanning or beaconing on a channel. We send to - * userspace the wiphy on which we removed a restriction from - * (%NL80211_ATTR_WIPHY) and the channel on which this occurred - * before (%NL80211_ATTR_FREQ_BEFORE) and after (%NL80211_ATTR_FREQ_AFTER) - * the beacon hint was processed. + * has been found while world roaming thus enabling active scan or + * any mode of operation that initiates TX (beacons) on a channel + * where we would not have been able to do either before. As an example + * if you are world roaming (regulatory domain set to world or if your + * driver is using a custom world roaming regulatory domain) and while + * doing a passive scan on the 5 GHz band you find an AP there (if not + * on a DFS channel) you will now be able to actively scan for that AP + * or use AP mode on your card on that same channel. Note that this will + * never be used for channels 1-11 on the 2 GHz band as they are always + * enabled world wide. This beacon hint is only sent if your device had + * either disabled active scanning or beaconing on a channel. We send to + * userspace the wiphy on which we removed a restriction from + * (%NL80211_ATTR_WIPHY) and the channel on which this occurred + * before (%NL80211_ATTR_FREQ_BEFORE) and after (%NL80211_ATTR_FREQ_AFTER) + * the beacon hint was processed. * * @NL80211_CMD_AUTHENTICATE: authentication request and notification. * This command is used both as a command (request to authenticate) and @@ -1120,7 +1115,7 @@ * current configuration is not changed. If it is present but * set to zero, the configuration is changed to don't-care * (i.e. the device can decide what to do). - * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported. + * @NL80211_CMD_NAN_MATCH: Notification sent when a match is reported. * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and * %NL80211_ATTR_COOKIE. * @@ -1715,21 +1710,21 @@ enum nl80211_commands { * (see &enum nl80211_plink_action). * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path. * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path - * info given for %NL80211_CMD_GET_MPATH, nested attribute described at + * info given for %NL80211_CMD_GET_MPATH, nested attribute described at * &enum nl80211_mpath_info. * * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of * &enum nl80211_mntr_flags. * * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the - * current regulatory domain should be set to or is already set to. - * For example, 'CR', for Costa Rica. This attribute is used by the kernel - * to query the CRDA to retrieve one regulatory domain. This attribute can - * also be used by userspace to query the kernel for the currently set - * regulatory domain. We chose an alpha2 as that is also used by the - * IEEE-802.11 country information element to identify a country. - * Users can also simply ask the wireless core to set regulatory domain - * to a specific alpha2. + * current regulatory domain should be set to or is already set to. + * For example, 'CR', for Costa Rica. This attribute is used by the kernel + * to query the CRDA to retrieve one regulatory domain. This attribute can + * also be used by userspace to query the kernel for the currently set + * regulatory domain. We chose an alpha2 as that is also used by the + * IEEE-802.11 country information element to identify a country. + * Users can also simply ask the wireless core to set regulatory domain + * to a specific alpha2. * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory * rules. * @@ -1772,9 +1767,9 @@ enum nl80211_commands { * @NL80211_ATTR_BSS: scan result BSS * * @NL80211_ATTR_REG_INITIATOR: indicates who requested the regulatory domain - * currently in effect. This could be any of the %NL80211_REGDOM_SET_BY_* + * currently in effect. This could be any of the %NL80211_REGDOM_SET_BY_* * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently - * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) + * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) * * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies * an array of command numbers (i.e. a mapping index to command number) @@ -1793,15 +1788,15 @@ enum nl80211_commands { * a u32 * * @NL80211_ATTR_FREQ_BEFORE: A channel which has suffered a regulatory change - * due to considerations from a beacon hint. This attribute reflects - * the state of the channel _before_ the beacon hint processing. This - * attributes consists of a nested attribute containing - * NL80211_FREQUENCY_ATTR_* + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _before_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* * @NL80211_ATTR_FREQ_AFTER: A channel which has suffered a regulatory change - * due to considerations from a beacon hint. This attribute reflects - * the state of the channel _after_ the beacon hint processing. This - * attributes consists of a nested attribute containing - * NL80211_FREQUENCY_ATTR_* + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _after_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* * * @NL80211_ATTR_CIPHER_SUITES: a set of u32 values indicating the supported * cipher suites @@ -1862,12 +1857,6 @@ enum nl80211_commands { * that protected APs should be used. This is also used with NEW_BEACON to * indicate that the BSS is to use protection. * - * @NL80211_ATTR_CIPHERS_PAIRWISE: Used with CONNECT, ASSOCIATE, and NEW_BEACON - * to indicate which unicast key ciphers will be used with the connection - * (an array of u32). - * @NL80211_ATTR_CIPHER_GROUP: Used with CONNECT, ASSOCIATE, and NEW_BEACON to - * indicate which group key cipher will be used with the connection (a - * u32). * @NL80211_ATTR_WPA_VERSIONS: Used with CONNECT, ASSOCIATE, and NEW_BEACON to * indicate which WPA version(s) the AP we want to associate with is using * (a u32 with flags from &enum nl80211_wpa_versions). @@ -1898,6 +1887,7 @@ enum nl80211_commands { * with %NL80211_KEY_* sub-attributes * * @NL80211_ATTR_PID: Process ID of a network namespace. + * @NL80211_ATTR_NETNS_FD: File descriptor of a network namespace. * * @NL80211_ATTR_GENERATION: Used to indicate consistent snapshots for * dumps. This number increases whenever the object list being @@ -1952,6 +1942,7 @@ enum nl80211_commands { * * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was * acknowledged by the recipient. + * @NL80211_ATTR_ACK_SIGNAL: Station's ack signal strength (s32) * * @NL80211_ATTR_PS_STATE: powersave state, using &enum nl80211_ps_state values. * @@ -2149,6 +2140,9 @@ enum nl80211_commands { * @NL80211_ATTR_DISABLE_HE: Force HE capable interfaces to disable * this feature during association. This is a flag attribute. * Currently only supported in mac80211 drivers. + * @NL80211_ATTR_DISABLE_EHT: Force EHT capable interfaces to disable + * this feature during association. This is a flag attribute. + * Currently only supported in mac80211 drivers. * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the * ATTR_HT_CAPABILITY to which attention should be paid. * Currently, only mac80211 NICs support this feature. @@ -2158,6 +2152,12 @@ enum nl80211_commands { * All values are treated as suggestions and may be ignored * by the driver as required. The actual values may be seen in * the station debugfs ht_caps file. + * @NL80211_ATTR_VHT_CAPABILITY_MASK: Specify which bits of the + * ATTR_VHT_CAPABILITY to which attention should be paid. + * Currently, only mac80211 NICs support this feature. + * All values are treated as suggestions and may be ignored + * by the driver as required. The actual values may be seen in + * the station debugfs vht_caps file. * * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country * abides to when initiating radiation on DFS channels. A country maps @@ -2416,7 +2416,7 @@ enum nl80211_commands { * scheduled scan is started. Or the delay before a WoWLAN * net-detect scan is started, counting from the moment the * system is suspended. This value is a u32, in seconds. - + * * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device * is operating in an indoor environment. * @@ -3565,7 +3565,7 @@ enum nl80211_sta_flags { * enum nl80211_sta_p2p_ps_status - station support of P2P PS * * @NL80211_P2P_PS_UNSUPPORTED: station doesn't support P2P PS mechanism - * @@NL80211_P2P_PS_SUPPORTED: station supports P2P PS mechanism + * @NL80211_P2P_PS_SUPPORTED: station supports P2P PS mechanism * @NUM_NL80211_P2P_PS_STATUS: number of values */ enum nl80211_sta_p2p_ps_status { @@ -3603,9 +3603,9 @@ enum nl80211_he_gi { /** * enum nl80211_he_ltf - HE long training field - * @NL80211_RATE_INFO_HE_1xLTF: 3.2 usec - * @NL80211_RATE_INFO_HE_2xLTF: 6.4 usec - * @NL80211_RATE_INFO_HE_4xLTF: 12.8 usec + * @NL80211_RATE_INFO_HE_1XLTF: 3.2 usec + * @NL80211_RATE_INFO_HE_2XLTF: 6.4 usec + * @NL80211_RATE_INFO_HE_4XLTF: 12.8 usec */ enum nl80211_he_ltf { NL80211_RATE_INFO_HE_1XLTF, @@ -3720,7 +3720,7 @@ enum nl80211_eht_ru_alloc { * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier * (u8, see &enum nl80211_he_gi) * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1) - * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then + * @NL80211_RATE_INFO_HE_RU_ALLOC: HE RU allocation, if not present then * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc) * @NL80211_RATE_INFO_320_MHZ_WIDTH: 320 MHz bitrate * @NL80211_RATE_INFO_EHT_MCS: EHT MCS index (u8, 0-15) @@ -3823,7 +3823,7 @@ enum nl80211_sta_bss_param { * (u64, to this station) * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute - * containing info as possible, see &enum nl80211_rate_info + * containing info as possible, see &enum nl80211_rate_info * @NL80211_STA_INFO_RX_PACKETS: total received packet (MSDUs and MMPDUs) * (u32, from this station) * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (MSDUs and MMPDUs) @@ -3852,8 +3852,8 @@ enum nl80211_sta_bss_param { * Contains a nested array of signal strength attributes (u8, dBm) * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average * Same format as NL80211_STA_INFO_CHAIN_SIGNAL. - * @NL80211_STA_EXPECTED_THROUGHPUT: expected throughput considering also the - * 802.11 header (u32, kbps) + * @NL80211_STA_INFO_EXPECTED_THROUGHPUT: expected throughput considering also + * the 802.11 header (u32, kbps) * @NL80211_STA_INFO_RX_DROP_MISC: RX packets dropped for unspecified reasons * (u64) * @NL80211_STA_INFO_BEACON_RX: number of beacons received from this peer (u64) @@ -4039,7 +4039,7 @@ enum nl80211_mpath_flags { * @NL80211_MPATH_INFO_METRIC: metric (cost) of this mesh path * @NL80211_MPATH_INFO_EXPTIME: expiration time for the path, in msec from now * @NL80211_MPATH_INFO_FLAGS: mesh path flags, enumerated in - * &enum nl80211_mpath_flags; + * &enum nl80211_mpath_flags; * @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec * @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries * @NL80211_MPATH_INFO_HOP_COUNT: hop count to destination @@ -4179,7 +4179,7 @@ enum nl80211_band_attr { * @NL80211_WMMR_CW_MAX: Maximum contention window slot. * @NL80211_WMMR_AIFSN: Arbitration Inter Frame Space. * @NL80211_WMMR_TXOP: Maximum allowed tx operation time. - * @nl80211_WMMR_MAX: highest possible wmm rule. + * @NL80211_WMMR_MAX: highest possible wmm rule. * @__NL80211_WMMR_LAST: Internal use. */ enum nl80211_wmm_rule { @@ -4201,8 +4201,9 @@ enum nl80211_wmm_rule { * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current * regulatory domain. * @NL80211_FREQUENCY_ATTR_NO_IR: no mechanisms that initiate radiation - * are permitted on this channel, this includes sending probe - * requests, or modes of operation that require beaconing. + * are permitted on this channel, this includes sending probe + * requests, or modes of operation that require beaconing. + * @__NL80211_FREQUENCY_ATTR_NO_IBSS: obsolete, same as _NO_IR * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm @@ -4357,16 +4358,16 @@ enum nl80211_bitrate_attr { }; /** - * enum nl80211_initiator - Indicates the initiator of a reg domain request + * enum nl80211_reg_initiator - Indicates the initiator of a reg domain request * @NL80211_REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world - * regulatory domain. + * regulatory domain. * @NL80211_REGDOM_SET_BY_USER: User asked the wireless core to set the - * regulatory domain. + * regulatory domain. * @NL80211_REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the - * wireless core it thinks its knows the regulatory domain we should be in. + * wireless core it thinks its knows the regulatory domain we should be in. * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an - * 802.11 country information element with regulatory information it - * thinks we should consider. cfg80211 only processes the country + * 802.11 country information element with regulatory information it + * thinks we should consider. cfg80211 only processes the country * code from the IE, and relies on the regulatory domain information * structure passed by userspace (CRDA) from our wireless-regdb. * If a channel is enabled but the country code indicates it should @@ -4385,11 +4386,11 @@ enum nl80211_reg_initiator { * to a specific country. When this is set you can count on the * ISO / IEC 3166 alpha2 country code being valid. * @NL80211_REGDOM_TYPE_WORLD: the regulatory set domain is the world regulatory - * domain. + * domain. * @NL80211_REGDOM_TYPE_CUSTOM_WORLD: the regulatory domain set is a custom - * driver specific world regulatory domain. These do not apply system-wide - * and are only applicable to the individual devices which have requested - * them to be applied. + * driver specific world regulatory domain. These do not apply system-wide + * and are only applicable to the individual devices which have requested + * them to be applied. * @NL80211_REGDOM_TYPE_INTERSECTION: the regulatory domain set is the product * of an intersection between two regulatory domains -- the previously * set regulatory domain on the system and the last accepted regulatory @@ -4406,21 +4407,21 @@ enum nl80211_reg_type { * enum nl80211_reg_rule_attr - regulatory rule attributes * @__NL80211_REG_RULE_ATTR_INVALID: attribute number 0 is reserved * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional - * considerations for a given frequency range. These are the - * &enum nl80211_reg_rule_flags. + * considerations for a given frequency range. These are the + * &enum nl80211_reg_rule_flags. * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory - * rule in KHz. This is not a center of frequency but an actual regulatory - * band edge. + * rule in KHz. This is not a center of frequency but an actual regulatory + * band edge. * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule - * in KHz. This is not a center a frequency but an actual regulatory - * band edge. + * in KHz. This is not a center a frequency but an actual regulatory + * band edge. * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this * frequency range, in KHz. * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain - * for a given frequency range. The value is in mBi (100 * dBi). - * If you don't have one then don't send this. + * for a given frequency range. The value is in mBi (100 * dBi). + * If you don't have one then don't send this. * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for - * a given frequency range. The value is in mBm (100 * dBm). + * a given frequency range. The value is in mBm (100 * dBm). * @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds. * If not present or 0 default CAC time will be used. * @NL80211_ATTR_POWER_RULE_PSD: power spectral density (in dBm). @@ -4507,8 +4508,9 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links * @NL80211_RRF_NO_IR: no mechanisms that initiate radiation are allowed, - * this includes probe requests or modes of operation that require - * beaconing. + * this includes probe requests or modes of operation that require + * beaconing. + * @__NL80211_RRF_NO_IBSS: obsolete, same as NO_IR * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated * base on contiguous rules and wider channels will be allowed to cross * multiple contiguous/overlapping frequency ranges. @@ -4522,9 +4524,9 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_EHT: EHT operation not allowed * @NL80211_RRF_PSD: Ruleset has power spectral density value * @NL80211_RRF_DFS_CONCURRENT: Operation on this channel is allowed for - peer-to-peer or adhoc communication under the control of a DFS master - which operates on the same channel (FCC-594280 D01 Section B.3). - Should be used together with %NL80211_RRF_DFS only. + * peer-to-peer or adhoc communication under the control of a DFS master + * which operates on the same channel (FCC-594280 D01 Section B.3). + * Should be used together with %NL80211_RRF_DFS only. * @NL80211_RRF_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP not allowed * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed */ @@ -4707,8 +4709,8 @@ enum nl80211_mntr_flags { * alternate between Active and Doze states, but may not wake up * for neighbor's beacons. * - * @__NL80211_MESH_POWER_AFTER_LAST - internal use - * @NL80211_MESH_POWER_MAX - highest possible power save level + * @__NL80211_MESH_POWER_AFTER_LAST: internal use + * @NL80211_MESH_POWER_MAX: highest possible power save level */ enum nl80211_mesh_power_mode { @@ -5728,7 +5730,7 @@ struct nl80211_pattern_support { * "TCP connection wakeup" for more details. This is a nested attribute * containing the exact information for establishing and keeping alive * the TCP connection. - * @NL80211_WOWLAN_TRIG_TCP_WAKEUP_MATCH: For wakeup reporting only, the + * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH: For wakeup reporting only, the * wakeup packet was received on the TCP connection * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST: For wakeup reporting only, the * TCP connection was lost or failed to be established @@ -6077,7 +6079,7 @@ enum nl80211_plink_state { * @NL80211_PLINK_ACTION_BLOCK: block traffic from this mesh peer * @NUM_NL80211_PLINK_ACTIONS: number of possible actions */ -enum plink_actions { +enum nl80211_plink_action { NL80211_PLINK_ACTION_NO_ACTION, NL80211_PLINK_ACTION_OPEN, NL80211_PLINK_ACTION_BLOCK, @@ -6404,6 +6406,7 @@ enum nl80211_feature_flags { * receiving control port frames over nl80211 instead of the netdevice. * @NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT: This driver/device supports * (average) ACK signal strength reporting. + * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: Backward-compatible ID * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate * TXQs. * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the @@ -6787,6 +6790,8 @@ enum nl80211_acl_policy { * @NL80211_SMPS_STATIC: static SMPS (use a single antenna) * @NL80211_SMPS_DYNAMIC: dynamic smps (start with a single antenna and * turn on other antennas after CTS/RTS). + * @__NL80211_SMPS_AFTER_LAST: internal + * @NL80211_SMPS_MAX: highest used enumeration */ enum nl80211_smps_mode { NL80211_SMPS_OFF, @@ -7008,6 +7013,8 @@ enum nl80211_bss_select_attr { * @NL80211_NAN_FUNC_PUBLISH: function is publish * @NL80211_NAN_FUNC_SUBSCRIBE: function is subscribe * @NL80211_NAN_FUNC_FOLLOW_UP: function is follow-up + * @__NL80211_NAN_FUNC_TYPE_AFTER_LAST: internal use + * @NL80211_NAN_FUNC_MAX_TYPE: internal use */ enum nl80211_nan_function_type { NL80211_NAN_FUNC_PUBLISH, @@ -7168,7 +7175,7 @@ enum nl80211_nan_match_attributes { }; /** - * nl80211_external_auth_action - Action to perform with external + * enum nl80211_external_auth_action - Action to perform with external * authentication request. Used by NL80211_ATTR_EXTERNAL_AUTH_ACTION. * @NL80211_EXTERNAL_AUTH_START: Start the authentication. * @NL80211_EXTERNAL_AUTH_ABORT: Abort the ongoing authentication. @@ -7186,7 +7193,7 @@ enum nl80211_external_auth_action { * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10), * i.e. starting with the measurement token - * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element + * @NL80211_FTM_RESP_ATTR_CIVICLOC: The content of Measurement Report Element * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13), * i.e. starting with the measurement token * @__NL80211_FTM_RESP_ATTR_LAST: Internal @@ -7829,6 +7836,7 @@ enum nl80211_sae_pwe_mechanism { * * @NL80211_SAR_TYPE_POWER: power limitation specified in 0.25dBm unit * + * @NUM_NL80211_SAR_TYPE: internal */ enum nl80211_sar_type { NL80211_SAR_TYPE_POWER, @@ -7842,6 +7850,8 @@ enum nl80211_sar_type { /** * enum nl80211_sar_attrs - Attributes for SAR spec * + * @__NL80211_SAR_ATTR_INVALID: Invalid + * * @NL80211_SAR_ATTR_TYPE: the SAR type as defined in &enum nl80211_sar_type. * * @NL80211_SAR_ATTR_SPECS: Nested array of SAR power @@ -7873,6 +7883,8 @@ enum nl80211_sar_attrs { /** * enum nl80211_sar_specs_attrs - Attributes for SAR power limit specs * + * @__NL80211_SAR_ATTR_SPECS_INVALID: Invalid + * * @NL80211_SAR_ATTR_SPECS_POWER: Required (s32)value to specify the actual * power limit value in units of 0.25 dBm if type is * NL80211_SAR_TYPE_POWER. (i.e., a value of 44 represents 11 dBm). diff --git a/include/uapi/linux/ntsync.h b/include/uapi/linux/ntsync.h new file mode 100644 index 0000000000..dcfa38fdc9 --- /dev/null +++ b/include/uapi/linux/ntsync.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Kernel support for NT synchronization primitive emulation + * + * Copyright (C) 2021-2022 Elizabeth Figura <zfigura@codeweavers.com> + */ + +#ifndef __LINUX_NTSYNC_H +#define __LINUX_NTSYNC_H + +#include <linux/types.h> + +struct ntsync_sem_args { + __u32 sem; + __u32 count; + __u32 max; +}; + +#define NTSYNC_IOC_CREATE_SEM _IOWR('N', 0x80, struct ntsync_sem_args) + +#define NTSYNC_IOC_SEM_POST _IOWR('N', 0x81, __u32) + +#endif diff --git a/include/uapi/linux/papr_pdsm.h b/include/uapi/linux/papr_pdsm.h new file mode 100644 index 0000000000..1743992504 --- /dev/null +++ b/include/uapi/linux/papr_pdsm.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl + * + * (C) Copyright IBM 2020 + * + * Author: Vaibhav Jain <vaibhav at linux.ibm.com> + */ + +#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_ +#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_ + +#include <linux/types.h> +#include <linux/ndctl.h> + +/* + * PDSM Envelope: + * + * The ioctl ND_CMD_CALL exchange data between user-space and kernel via + * envelope which consists of 2 headers sections and payload sections as + * illustrated below: + * +-----------------+---------------+---------------------------+ + * | 64-Bytes | 8-Bytes | Max 184-Bytes | + * +-----------------+---------------+---------------------------+ + * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD | + * +-----------------+---------------+---------------------------+ + * | nd_family | | | + * | nd_size_out | cmd_status | | + * | nd_size_in | reserved | nd_pdsm_payload | + * | nd_command | payload --> | | + * | nd_fw_size | | | + * | nd_payload ---> | | | + * +---------------+-----------------+---------------------------+ + * + * ND Header: + * This is the generic libnvdimm header described as 'struct nd_cmd_pkg' + * which is interpreted by libnvdimm before passed on to papr_scm. Important + * member fields used are: + * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM + * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0) + * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD + * 'nd_command' : (In) One of PAPR_PDSM_XXX + * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned + * + * PDSM Header: + * This is papr-scm specific header that precedes the payload. This is defined + * as nd_cmd_pdsm_pkg. Following fields aare available in this header: + * + * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM. + * 'reserved' : Not used, reserved for future and should be set to 0. + * 'payload' : A union of all the possible payload structs + * + * PDSM Payload: + * + * The layout of the PDSM Payload is defined by various structs shared between + * papr_scm and libndctl so that contents of payload can be interpreted. As such + * its defined as a union of all possible payload structs as + * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command' + * appropriate member of the union is accessed. + */ + +/* Max payload size that we can handle */ +#define ND_PDSM_PAYLOAD_MAX_SIZE 184 + +/* Max payload size that we can handle */ +#define ND_PDSM_HDR_SIZE \ + (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE) + +/* Various nvdimm health indicators */ +#define PAPR_PDSM_DIMM_HEALTHY 0 +#define PAPR_PDSM_DIMM_UNHEALTHY 1 +#define PAPR_PDSM_DIMM_CRITICAL 2 +#define PAPR_PDSM_DIMM_FATAL 3 + +/* struct nd_papr_pdsm_health.extension_flags field flags */ + +/* Indicate that the 'dimm_fuel_gauge' field is valid */ +#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1 + +/* Indicate that the 'dimm_dsc' field is valid */ +#define PDSM_DIMM_DSC_VALID 2 + +/* + * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH + * Various flags indicate the health status of the dimm. + * + * extension_flags : Any extension fields present in the struct. + * dimm_unarmed : Dimm not armed. So contents wont persist. + * dimm_bad_shutdown : Previous shutdown did not persist contents. + * dimm_bad_restore : Contents from previous shutdown werent restored. + * dimm_scrubbed : Contents of the dimm have been scrubbed. + * dimm_locked : Contents of the dimm cant be modified until CEC reboot + * dimm_encrypted : Contents of dimm are encrypted. + * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX + * dimm_fuel_gauge : Life remaining of DIMM as a percentage from 0-100 + */ +struct nd_papr_pdsm_health { + union { + struct { + __u32 extension_flags; + __u8 dimm_unarmed; + __u8 dimm_bad_shutdown; + __u8 dimm_bad_restore; + __u8 dimm_scrubbed; + __u8 dimm_locked; + __u8 dimm_encrypted; + __u16 dimm_health; + + /* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */ + __u16 dimm_fuel_gauge; + + /* Extension flag PDSM_DIMM_DSC_VALID */ + __u64 dimm_dsc; + }; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; + }; +}; + +/* Flags for injecting specific smart errors */ +#define PDSM_SMART_INJECT_HEALTH_FATAL (1 << 0) +#define PDSM_SMART_INJECT_BAD_SHUTDOWN (1 << 1) + +struct nd_papr_pdsm_smart_inject { + union { + struct { + /* One or more of PDSM_SMART_INJECT_ */ + __u32 flags; + __u8 fatal_enable; + __u8 unsafe_shutdown_enable; + }; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; + }; +}; + +/* + * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel + * via 'nd_cmd_pkg.nd_command' member of the ioctl struct + */ +enum papr_pdsm { + PAPR_PDSM_MIN = 0x0, + PAPR_PDSM_HEALTH, + PAPR_PDSM_SMART_INJECT, + PAPR_PDSM_MAX, +}; + +/* Maximal union that can hold all possible payload types */ +union nd_pdsm_payload { + struct nd_papr_pdsm_health health; + struct nd_papr_pdsm_smart_inject smart_inject; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; +} __packed; + +/* + * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm + * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command' + * that should always precede this struct when sent to papr_scm via CMD_CALL + * interface. + */ +struct nd_pkg_pdsm { + __s32 cmd_status; /* Out: Sub-cmd status returned back */ + __u16 reserved[2]; /* Ignored and to be set as '0' */ + union nd_pdsm_payload payload; +} __packed; + +#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index a39193213f..94c00996e6 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -1144,8 +1144,14 @@ #define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff #define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff +#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER 0x0000ff00 #define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff #define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000 #define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000 +/* Compute Express Link (CXL r3.1, sec 8.1.5) */ +#define PCI_DVSEC_CXL_PORT 3 +#define PCI_DVSEC_CXL_PORT_CTL 0x0c +#define PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR 0x00000001 + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index ea277039f8..229fc925ec 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -587,6 +587,10 @@ enum { * TCA_FLOWER_KEY_ENC_OPT_GTP_ * attributes */ + TCA_FLOWER_KEY_ENC_OPTS_PFCP, /* Nested + * TCA_FLOWER_KEY_ENC_IPT_PFCP + * attributes + */ __TCA_FLOWER_KEY_ENC_OPTS_MAX, }; @@ -637,6 +641,16 @@ enum { (__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1) enum { + TCA_FLOWER_KEY_ENC_OPT_PFCP_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, /* u8 */ + TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, /* be64 */ + __TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, +}; + +#define TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX \ + (__TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX - 1) + +enum { TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC, TCA_FLOWER_KEY_MPLS_OPTS_LSE, __TCA_FLOWER_KEY_MPLS_OPTS_MAX, diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 370ed14b1a..35791791a8 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -306,4 +306,26 @@ struct prctl_mm_map { # define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc # define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f +#define PR_RISCV_SET_ICACHE_FLUSH_CTX 71 +# define PR_RISCV_CTX_SW_FENCEI_ON 0 +# define PR_RISCV_CTX_SW_FENCEI_OFF 1 +# define PR_RISCV_SCOPE_PER_PROCESS 0 +# define PR_RISCV_SCOPE_PER_THREAD 1 + +/* PowerPC Dynamic Execution Control Register (DEXCR) controls */ +#define PR_PPC_GET_DEXCR 72 +#define PR_PPC_SET_DEXCR 73 +/* DEXCR aspect to act on */ +# define PR_PPC_DEXCR_SBHE 0 /* Speculative branch hint enable */ +# define PR_PPC_DEXCR_IBRTPD 1 /* Indirect branch recurrent target prediction disable */ +# define PR_PPC_DEXCR_SRAPD 2 /* Subroutine return address prediction disable */ +# define PR_PPC_DEXCR_NPHIE 3 /* Non-privileged hash instruction enable */ +/* Action to apply / return */ +# define PR_PPC_DEXCR_CTRL_EDITABLE 0x1 /* Aspect can be modified with PR_PPC_SET_DEXCR */ +# define PR_PPC_DEXCR_CTRL_SET 0x2 /* Set the aspect for this process */ +# define PR_PPC_DEXCR_CTRL_CLEAR 0x4 /* Clear the aspect for this process */ +# define PR_PPC_DEXCR_CTRL_SET_ONEXEC 0x8 /* Set the aspect on exec */ +# define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */ +# define PR_PPC_DEXCR_CTRL_MASK 0x1f + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index a0819c6a59..adf5fd78dd 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -337,6 +337,8 @@ enum LINUX_MIB_XFRMFWDHDRERROR, /* XfrmFwdHdrError*/ LINUX_MIB_XFRMOUTSTATEINVALID, /* XfrmOutStateInvalid */ LINUX_MIB_XFRMACQUIREERROR, /* XfrmAcquireError */ + LINUX_MIB_XFRMOUTSTATEDIRERROR, /* XfrmOutStateDirError */ + LINUX_MIB_XFRMINSTATEDIRERROR, /* XfrmInStateDirError */ __LINUX_MIB_XFRMMAX }; diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 2f2ee82d55..95770941ee 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -127,7 +127,8 @@ struct statx { __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ /* 0xa0 */ - __u64 __spare3[12]; /* Spare space for future expansion */ + __u64 stx_subvol; /* Subvolume identifier */ + __u64 __spare3[11]; /* Spare space for future expansion */ /* 0x100 */ }; @@ -155,6 +156,7 @@ struct statx { #define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ #define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ +#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index 2ec6f35cda..58154117d9 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -55,4 +55,12 @@ #define __counted_by(m) #endif +#ifndef __counted_by_le +#define __counted_by_le(m) +#endif + +#ifndef __counted_by_be +#define __counted_by_be(m) +#endif + #endif /* _UAPI_LINUX_STDDEF_H */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index c07e9f90c0..dbf896f314 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -135,6 +135,8 @@ enum { #define TCP_AO_GET_KEYS 41 /* List MKT(s) */ #define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */ +#define TCP_IS_MPTCP 43 /* Is MPTCP being used? */ + #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index 23e5716469..d0430bee82 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -56,6 +56,7 @@ */ #define TEE_IMPL_ID_OPTEE 1 #define TEE_IMPL_ID_AMDTEE 2 +#define TEE_IMPL_ID_TSTEE 3 /* * OP-TEE specific capabilities diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h new file mode 100644 index 0000000000..c102ef35d1 --- /dev/null +++ b/include/uapi/linux/trace_mmap.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _TRACE_MMAP_H_ +#define _TRACE_MMAP_H_ + +#include <linux/types.h> + +/** + * struct trace_buffer_meta - Ring-buffer Meta-page description + * @meta_page_size: Size of this meta-page. + * @meta_struct_len: Size of this structure. + * @subbuf_size: Size of each sub-buffer. + * @nr_subbufs: Number of subbfs in the ring-buffer, including the reader. + * @reader.lost_events: Number of events lost at the time of the reader swap. + * @reader.id: subbuf ID of the current reader. ID range [0 : @nr_subbufs - 1] + * @reader.read: Number of bytes read on the reader subbuf. + * @flags: Placeholder for now, 0 until new features are supported. + * @entries: Number of entries in the ring-buffer. + * @overrun: Number of entries lost in the ring-buffer. + * @read: Number of entries that have been read. + * @Reserved1: Internal use only. + * @Reserved2: Internal use only. + */ +struct trace_buffer_meta { + __u32 meta_page_size; + __u32 meta_struct_len; + + __u32 subbuf_size; + __u32 nr_subbufs; + + struct { + __u64 lost_events; + __u32 id; + __u32 read; + } reader; + + __u64 flags; + + __u64 entries; + __u64 overrun; + __u64 read; + + __u64 Reserved1; + __u64 Reserved2; +}; + +#define TRACE_MMAP_IOCTL_GET_READER _IO('R', 0x20) + +#endif /* _TRACE_MMAP_H_ */ diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index 4828794efc..1a0fe8b151 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h @@ -36,7 +36,7 @@ struct udphdr { #define UDP_GRO 104 /* This socket can receive UDP GRO packets */ /* UDP encapsulation types */ -#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */ +#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* unused draft-ietf-ipsec-nat-t-ike-00/01 */ #define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */ #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ #define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h index 6b07b73473..946520bc49 100644 --- a/include/uapi/linux/v4l2-mediabus.h +++ b/include/uapi/linux/v4l2-mediabus.h @@ -19,12 +19,18 @@ * @width: image width * @height: image height * @code: data format code (from enum v4l2_mbus_pixelcode) - * @field: used interlacing type (from enum v4l2_field) - * @colorspace: colorspace of the data (from enum v4l2_colorspace) - * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding) - * @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding) - * @quantization: quantization of the data (from enum v4l2_quantization) - * @xfer_func: transfer function of the data (from enum v4l2_xfer_func) + * @field: used interlacing type (from enum v4l2_field), zero for metadata + * mbus codes + * @colorspace: colorspace of the data (from enum v4l2_colorspace), zero on + * metadata mbus codes + * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding), zero + * for metadata mbus codes + * @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding), zero for + * metadata mbus codes + * @quantization: quantization of the data (from enum v4l2_quantization), zero + * for metadata mbus codes + * @xfer_func: transfer function of the data (from enum v4l2_xfer_func), zero + * for metadata mbus codes * @flags: flags (V4L2_MBUS_FRAMEFMT_*) * @reserved: reserved bytes that can be later used */ diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index 7048c51581..2347e266cf 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -50,6 +50,10 @@ struct v4l2_subdev_format { * @rect: pad crop rectangle boundaries * @stream: stream number, defined in subdev routing * @reserved: drivers and applications must zero this array + * + * The subdev crop API is an obsolete interface and may be removed in the + * future. It is superseded by the selection API. No new extensions to this + * structure will be accepted. */ struct v4l2_subdev_crop { __u32 which; @@ -224,15 +228,19 @@ struct v4l2_subdev_route { * struct v4l2_subdev_routing - Subdev routing information * * @which: configuration type (from enum v4l2_subdev_format_whence) - * @num_routes: the total number of routes in the routes array + * @len_routes: the length of the routes array, in routes; set by the user, not + * modified by the kernel * @routes: pointer to the routes array + * @num_routes: the total number of routes, possibly more than fits in the + * routes array * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_routing { __u32 which; - __u32 num_routes; + __u32 len_routes; __u64 routes; - __u32 reserved[6]; + __u32 num_routes; + __u32 reserved[11]; }; /* diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index a8015e5e7f..fe6b67e837 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -599,6 +599,8 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */ #define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */ #define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */ +#define V4L2_PIX_FMT_Y12P v4l2_fourcc('Y', '1', '2', 'P') /* 12 Greyscale, MIPI RAW12 packed */ +#define V4L2_PIX_FMT_Y14P v4l2_fourcc('Y', '1', '4', 'P') /* 14 Greyscale, MIPI RAW14 packed */ /* Palette formats */ #define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ @@ -839,6 +841,20 @@ struct v4l2_pix_format { #define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */ #define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */ +#ifdef __KERNEL__ +/* + * Line-based metadata formats. Remember to update v4l_fill_fmtdesc() when + * adding new ones! + */ +#define V4L2_META_FMT_GENERIC_8 v4l2_fourcc('M', 'E', 'T', '8') /* Generic 8-bit metadata */ +#define V4L2_META_FMT_GENERIC_CSI2_10 v4l2_fourcc('M', 'C', '1', 'A') /* 10-bit CSI-2 packed 8-bit metadata */ +#define V4L2_META_FMT_GENERIC_CSI2_12 v4l2_fourcc('M', 'C', '1', 'C') /* 12-bit CSI-2 packed 8-bit metadata */ +#define V4L2_META_FMT_GENERIC_CSI2_14 v4l2_fourcc('M', 'C', '1', 'E') /* 14-bit CSI-2 packed 8-bit metadata */ +#define V4L2_META_FMT_GENERIC_CSI2_16 v4l2_fourcc('M', 'C', '1', 'G') /* 16-bit CSI-2 packed 8-bit metadata */ +#define V4L2_META_FMT_GENERIC_CSI2_20 v4l2_fourcc('M', 'C', '1', 'K') /* 20-bit CSI-2 packed 8-bit metadata */ +#define V4L2_META_FMT_GENERIC_CSI2_24 v4l2_fourcc('M', 'C', '1', 'O') /* 24-bit CSI-2 packed 8-bit metadata */ +#endif + /* priv field value to indicates that subsequent fields are valid. */ #define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe @@ -869,6 +885,7 @@ struct v4l2_fmtdesc { #define V4L2_FMT_FLAG_CSC_YCBCR_ENC 0x0080 #define V4L2_FMT_FLAG_CSC_HSV_ENC V4L2_FMT_FLAG_CSC_YCBCR_ENC #define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100 +#define V4L2_FMT_FLAG_META_LINE_BASED 0x0200 /* Frame Size and frame rate enumeration */ /* @@ -1036,6 +1053,7 @@ struct v4l2_requestbuffers { #define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5) #define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6) #define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7) +#define V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS (1 << 8) /** * struct v4l2_plane - plane info for multi-planar buffers @@ -1841,7 +1859,7 @@ struct v4l2_ext_control { struct v4l2_ctrl_hdr10_cll_info __user *p_hdr10_cll_info; struct v4l2_ctrl_hdr10_mastering_display __user *p_hdr10_mastering_display; void __user *ptr; - }; + } __attribute__ ((packed)); } __attribute__ ((packed)); struct v4l2_ext_controls { @@ -2415,10 +2433,19 @@ struct v4l2_sdr_format { * struct v4l2_meta_format - metadata format definition * @dataformat: little endian four character code (fourcc) * @buffersize: maximum size in bytes required for data + * @width: number of data units of data per line (valid for line + * based formats only, see format documentation) + * @height: number of lines of data per buffer (valid for line based + * formats only) + * @bytesperline: offset between the beginnings of two adjacent lines in + * bytes (valid for line based formats only) */ struct v4l2_meta_format { __u32 dataformat; __u32 buffersize; + __u32 width; + __u32 height; + __u32 bytesperline; } __attribute__ ((packed)); /** @@ -2624,6 +2651,20 @@ struct v4l2_create_buffers { __u32 reserved[5]; }; +/** + * struct v4l2_remove_buffers - VIDIOC_REMOVE_BUFS argument + * @index: the first buffer to be removed + * @count: number of buffers to removed + * @type: enum v4l2_buf_type + * @reserved: future extensions + */ +struct v4l2_remove_buffers { + __u32 index; + __u32 count; + __u32 type; + __u32 reserved[13]; +}; + /* * I O C T L C O D E S F O R V I D E O D E V I C E S * @@ -2723,6 +2764,8 @@ struct v4l2_create_buffers { #define VIDIOC_DBG_G_CHIP_INFO _IOWR('V', 102, struct v4l2_dbg_chip_info) #define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl) +#define VIDIOC_REMOVE_BUFS _IOWR('V', 104, struct v4l2_remove_buffers) + /* Reminder: when adding new ioctls please add support for them to drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */ diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h index e9122f1d0e..6e4b2cf6b7 100644 --- a/include/uapi/linux/virtio_mem.h +++ b/include/uapi/linux/virtio_mem.h @@ -90,6 +90,8 @@ #define VIRTIO_MEM_F_ACPI_PXM 0 /* unplugged memory must not be accessed */ #define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1 +/* plugged memory will remain plugged when suspending+resuming */ +#define VIRTIO_MEM_F_PERSISTENT_SUSPEND 2 /* --- virtio-mem: guest -> host requests --- */ diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index cc65ef0f3c..ac9174717e 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -56,6 +56,7 @@ #define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#define VIRTIO_NET_F_DEVICE_STATS 50 /* Device can provide device-level statistics. */ #define VIRTIO_NET_F_VQ_NOTF_COAL 52 /* Device supports virtqueue notification coalescing */ #define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */ #define VIRTIO_NET_F_GUEST_USO4 54 /* Guest can handle USOv4 in. */ @@ -406,4 +407,146 @@ struct virtio_net_ctrl_coal_vq { struct virtio_net_ctrl_coal coal; }; +/* + * Device Statistics + */ +#define VIRTIO_NET_CTRL_STATS 8 +#define VIRTIO_NET_CTRL_STATS_QUERY 0 +#define VIRTIO_NET_CTRL_STATS_GET 1 + +struct virtio_net_stats_capabilities { + +#define VIRTIO_NET_STATS_TYPE_CVQ (1ULL << 32) + +#define VIRTIO_NET_STATS_TYPE_RX_BASIC (1ULL << 0) +#define VIRTIO_NET_STATS_TYPE_RX_CSUM (1ULL << 1) +#define VIRTIO_NET_STATS_TYPE_RX_GSO (1ULL << 2) +#define VIRTIO_NET_STATS_TYPE_RX_SPEED (1ULL << 3) + +#define VIRTIO_NET_STATS_TYPE_TX_BASIC (1ULL << 16) +#define VIRTIO_NET_STATS_TYPE_TX_CSUM (1ULL << 17) +#define VIRTIO_NET_STATS_TYPE_TX_GSO (1ULL << 18) +#define VIRTIO_NET_STATS_TYPE_TX_SPEED (1ULL << 19) + + __le64 supported_stats_types[1]; +}; + +struct virtio_net_ctrl_queue_stats { + struct { + __le16 vq_index; + __le16 reserved[3]; + __le64 types_bitmap[1]; + } stats[1]; +}; + +struct virtio_net_stats_reply_hdr { +#define VIRTIO_NET_STATS_TYPE_REPLY_CVQ 32 + +#define VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC 0 +#define VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM 1 +#define VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO 2 +#define VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED 3 + +#define VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC 16 +#define VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM 17 +#define VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO 18 +#define VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED 19 + __u8 type; + __u8 reserved; + __le16 vq_index; + __le16 reserved1; + __le16 size; +}; + +struct virtio_net_stats_cvq { + struct virtio_net_stats_reply_hdr hdr; + + __le64 command_num; + __le64 ok_num; +}; + +struct virtio_net_stats_rx_basic { + struct virtio_net_stats_reply_hdr hdr; + + __le64 rx_notifications; + + __le64 rx_packets; + __le64 rx_bytes; + + __le64 rx_interrupts; + + __le64 rx_drops; + __le64 rx_drop_overruns; +}; + +struct virtio_net_stats_tx_basic { + struct virtio_net_stats_reply_hdr hdr; + + __le64 tx_notifications; + + __le64 tx_packets; + __le64 tx_bytes; + + __le64 tx_interrupts; + + __le64 tx_drops; + __le64 tx_drop_malformed; +}; + +struct virtio_net_stats_rx_csum { + struct virtio_net_stats_reply_hdr hdr; + + __le64 rx_csum_valid; + __le64 rx_needs_csum; + __le64 rx_csum_none; + __le64 rx_csum_bad; +}; + +struct virtio_net_stats_tx_csum { + struct virtio_net_stats_reply_hdr hdr; + + __le64 tx_csum_none; + __le64 tx_needs_csum; +}; + +struct virtio_net_stats_rx_gso { + struct virtio_net_stats_reply_hdr hdr; + + __le64 rx_gso_packets; + __le64 rx_gso_bytes; + __le64 rx_gso_packets_coalesced; + __le64 rx_gso_bytes_coalesced; +}; + +struct virtio_net_stats_tx_gso { + struct virtio_net_stats_reply_hdr hdr; + + __le64 tx_gso_packets; + __le64 tx_gso_bytes; + __le64 tx_gso_segments; + __le64 tx_gso_segments_bytes; + __le64 tx_gso_packets_noseg; + __le64 tx_gso_bytes_noseg; +}; + +struct virtio_net_stats_rx_speed { + struct virtio_net_stats_reply_hdr hdr; + + /* rx_{packets,bytes}_allowance_exceeded are too long. So rename to + * short name. + */ + __le64 rx_ratelimit_packets; + __le64 rx_ratelimit_bytes; +}; + +struct virtio_net_stats_tx_speed { + struct virtio_net_stats_reply_hdr hdr; + + /* tx_{packets,bytes}_allowance_exceeded are too long. So rename to + * short name. + */ + __le64 tx_ratelimit_packets; + __le64 tx_ratelimit_bytes; +}; + #endif /* _UAPI_LINUX_VIRTIO_NET_H */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 594b66e163..d950d02ab7 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -141,6 +141,11 @@ enum { XFRM_POLICY_MAX = 3 }; +enum xfrm_sa_dir { + XFRM_SA_DIR_IN = 1, + XFRM_SA_DIR_OUT = 2 +}; + enum { XFRM_SHARE_ANY, /* No limitations */ XFRM_SHARE_SESSION, /* For this session only */ @@ -315,6 +320,7 @@ enum xfrm_attr_type_t { XFRMA_SET_MARK_MASK, /* __u32 */ XFRMA_IF_ID, /* __u32 */ XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */ + XFRMA_SA_DIR, /* __u8 */ __XFRMA_MAX #define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */ diff --git a/include/uapi/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h index 6e574d7b7d..393f2ee9c0 100644 --- a/include/uapi/linux/zorro_ids.h +++ b/include/uapi/linux/zorro_ids.h @@ -449,6 +449,9 @@ #define ZORRO_PROD_VMC_ISDN_BLASTER_Z2 ZORRO_ID(VMC, 0x01, 0) #define ZORRO_PROD_VMC_HYPERCOM_4 ZORRO_ID(VMC, 0x02, 0) +#define ZORRO_MANUF_CSLAB 0x1400 +#define ZORRO_PROD_CSLAB_WARP_1260 ZORRO_ID(CSLAB, 0x65, 0) + #define ZORRO_MANUF_INFORMATION 0x157C #define ZORRO_PROD_INFORMATION_ISDN_ENGINE_I ZORRO_ID(INFORMATION, 0x64, 0) diff --git a/include/uapi/misc/pvpanic.h b/include/uapi/misc/pvpanic.h index 54b7485390..3f1745cd1b 100644 --- a/include/uapi/misc/pvpanic.h +++ b/include/uapi/misc/pvpanic.h @@ -3,7 +3,10 @@ #ifndef __PVPANIC_H__ #define __PVPANIC_H__ -#define PVPANIC_PANICKED (1 << 0) -#define PVPANIC_CRASH_LOADED (1 << 1) +#include <linux/const.h> + +#define PVPANIC_PANICKED _BITUL(0) +#define PVPANIC_CRASH_LOADED _BITUL(1) +#define PVPANIC_SHUTDOWN _BITUL(2) #endif /* __PVPANIC_H__ */ diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 701e2d567e..d689b8b341 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -85,11 +85,17 @@ enum { EFA_QP_DRIVER_TYPE_SRD = 0, }; +enum { + EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV = 1 << 0, +}; + struct efa_ibv_create_qp { __u32 comp_mask; __u32 rq_ring_size; /* bytes */ __u32 sq_ring_size; /* bytes */ __u32 driver_qp_type; + __u16 flags; + __u8 reserved_90[6]; }; struct efa_ibv_create_qp_resp { @@ -123,6 +129,7 @@ enum { EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID = 1 << 3, EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4, EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5, + EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV = 1 << 6, }; struct efa_ibv_ex_query_device_resp { diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 158670da2b..94e861870e 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -109,6 +109,12 @@ struct hns_roce_ib_create_qp_resp { __aligned_u64 dwqe_mmap_key; }; +struct hns_roce_ib_modify_qp_resp { + __u8 tc_mode; + __u8 priority; + __u8 reserved[6]; +}; + enum { HNS_ROCE_EXSGE_FLAGS = 1 << 0, HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1, @@ -143,7 +149,8 @@ struct hns_roce_ib_alloc_pd_resp { struct hns_roce_ib_create_ah_resp { __u8 dmac[6]; - __u8 reserved[2]; + __u8 priority; + __u8 tc_mode; }; #endif /* HNS_ABI_USER_H */ diff --git a/include/uapi/rdma/mana-abi.h b/include/uapi/rdma/mana-abi.h index 5fcb31b37f..2c41cc3152 100644 --- a/include/uapi/rdma/mana-abi.h +++ b/include/uapi/rdma/mana-abi.h @@ -16,8 +16,20 @@ #define MANA_IB_UVERBS_ABI_VERSION 1 +enum mana_ib_create_cq_flags { + MANA_IB_CREATE_RNIC_CQ = 1 << 0, +}; + struct mana_ib_create_cq { __aligned_u64 buf_addr; + __u16 flags; + __u16 reserved0; + __u32 reserved1; +}; + +struct mana_ib_create_cq_resp { + __u32 cqid; + __u32 reserved; }; struct mana_ib_create_qp { diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 723bbb0f70..a214fc259f 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -558,6 +558,12 @@ enum rdma_nldev_attr { RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, /* u8 */ + RDMA_NLDEV_ATTR_DRIVER_DETAILS, /* u8 */ + /* + * QP subtype string, used for driver QPs + */ + RDMA_NLDEV_ATTR_RES_SUBTYPE, /* string */ + /* * Always the end */ diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h index 30a5c1a593..a3ba779a3f 100644 --- a/include/uapi/scsi/scsi_bsg_mpi3mr.h +++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h @@ -401,7 +401,7 @@ struct mpi3mr_buf_entry { __u32 buf_len; }; /** - * struct mpi3mr_bsg_buf_entry_list - list of user buffer + * struct mpi3mr_buf_entry_list - list of user buffer * descriptor for MPI Passthrough requests. * * @num_of_entries: Number of buffer descriptors @@ -424,6 +424,7 @@ struct mpi3mr_buf_entry_list { * @mrioc_id: Controller ID * @rsvd1: Reserved * @timeout: MPI request timeout + * @rsvd2: Reserved * @buf_entry_list: Buffer descriptor list */ struct mpi3mr_bsg_mptcmd { @@ -441,8 +442,9 @@ struct mpi3mr_bsg_mptcmd { * @cmd_type: represents drvrcmd or mptcmd * @rsvd1: Reserved * @rsvd2: Reserved - * @drvrcmd: driver request structure - * @mptcmd: mpt request structure + * @rsvd3: Reserved + * @cmd.drvrcmd: driver request structure + * @cmd.mptcmd: mpt request structure */ struct mpi3mr_bsg_packet { __u8 cmd_type; diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h index 03f2beadf2..8c29e498ef 100644 --- a/include/uapi/scsi/scsi_bsg_ufs.h +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -123,6 +123,7 @@ struct utp_upiu_query { * @idn: a value that indicates the particular type of data B-1 * @index: Index to further identify data B-2 * @selector: Index to further identify data B-3 + * @osf3: spec field B-4 * @osf4: spec field B-5 * @osf5: spec field B 6,7 * @osf6: spec field DW 8,9 @@ -138,12 +139,13 @@ struct utp_upiu_query_v4_0 { __be16 osf5; __be32 osf6; __be32 osf7; + /* private: */ __be32 reserved; }; /** * struct utp_upiu_cmd - Command UPIU structure - * @data_transfer_len: Data Transfer Length DW-3 + * @exp_data_transfer_len: Data Transfer Length DW-3 * @cdb: Command Descriptor Block CDB DW-4 to DW-7 */ struct utp_upiu_cmd { diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 10851bca71..99333cbd31 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -576,60 +576,4 @@ struct snd_soc_tplg_dai { struct snd_soc_tplg_private priv; } __attribute__((packed)); -/* - * Old version of ABI structs, supported for backward compatibility. - */ - -/* Manifest v4 */ -struct snd_soc_tplg_manifest_v4 { - __le32 size; /* in bytes of this structure */ - __le32 control_elems; /* number of control elements */ - __le32 widget_elems; /* number of widget elements */ - __le32 graph_elems; /* number of graph elements */ - __le32 pcm_elems; /* number of PCM elements */ - __le32 dai_link_elems; /* number of DAI link elements */ - struct snd_soc_tplg_private priv; -} __packed; - -/* Stream Capabilities v4 */ -struct snd_soc_tplg_stream_caps_v4 { - __le32 size; /* in bytes of this structure */ - char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - __le64 formats; /* supported formats SNDRV_PCM_FMTBIT_* */ - __le32 rates; /* supported rates SNDRV_PCM_RATE_* */ - __le32 rate_min; /* min rate */ - __le32 rate_max; /* max rate */ - __le32 channels_min; /* min channels */ - __le32 channels_max; /* max channels */ - __le32 periods_min; /* min number of periods */ - __le32 periods_max; /* max number of periods */ - __le32 period_size_min; /* min period size bytes */ - __le32 period_size_max; /* max period size bytes */ - __le32 buffer_size_min; /* min buffer size bytes */ - __le32 buffer_size_max; /* max buffer size bytes */ -} __packed; - -/* PCM v4 */ -struct snd_soc_tplg_pcm_v4 { - __le32 size; /* in bytes of this structure */ - char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - __le32 pcm_id; /* unique ID - used to match with DAI link */ - __le32 dai_id; /* unique ID - used to match */ - __le32 playback; /* supports playback mode */ - __le32 capture; /* supports capture mode */ - __le32 compress; /* 1 = compressed; 0 = PCM */ - struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* for DAI link */ - __le32 num_streams; /* number of streams */ - struct snd_soc_tplg_stream_caps_v4 caps[2]; /* playback and capture for DAI */ -} __packed; - -/* Physical link config v4 */ -struct snd_soc_tplg_link_config_v4 { - __le32 size; /* in bytes of this structure */ - __le32 id; /* unique ID - used to match */ - struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */ - __le32 num_streams; /* number of streams */ -} __packed; - #endif diff --git a/include/uapi/sound/intel/avs/tokens.h b/include/uapi/sound/intel/avs/tokens.h index 4beca03405..3e3fb258dd 100644 --- a/include/uapi/sound/intel/avs/tokens.h +++ b/include/uapi/sound/intel/avs/tokens.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* - * Copyright(c) 2021 Intel Corporation. All rights reserved. + * Copyright(c) 2021 Intel Corporation * * Authors: Cezary Rojewski <cezary.rojewski@intel.com> * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index 4bf9c4f9ad..940c426932 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -165,78 +165,4 @@ enum skl_tuple_type { SKL_TYPE_DATA }; -/* v4 configuration data */ - -struct skl_dfw_v4_module_pin { - __u16 module_id; - __u16 instance_id; -} __packed; - -struct skl_dfw_v4_module_fmt { - __u32 channels; - __u32 freq; - __u32 bit_depth; - __u32 valid_bit_depth; - __u32 ch_cfg; - __u32 interleaving_style; - __u32 sample_type; - __u32 ch_map; -} __packed; - -struct skl_dfw_v4_module_caps { - __u32 set_params:2; - __u32 rsvd:30; - __u32 param_id; - __u32 caps_size; - __u32 caps[HDA_SST_CFG_MAX]; -} __packed; - -struct skl_dfw_v4_pipe { - __u8 pipe_id; - __u8 pipe_priority; - __u16 conn_type:4; - __u16 rsvd:4; - __u16 memory_pages:8; -} __packed; - -struct skl_dfw_v4_module { - char uuid[SKL_UUID_STR_SZ]; - - __u16 module_id; - __u16 instance_id; - __u32 max_mcps; - __u32 mem_pages; - __u32 obs; - __u32 ibs; - __u32 vbus_id; - - __u32 max_in_queue:8; - __u32 max_out_queue:8; - __u32 time_slot:8; - __u32 core_id:4; - __u32 rsvd1:4; - - __u32 module_type:8; - __u32 conn_type:4; - __u32 dev_type:4; - __u32 hw_conn_type:4; - __u32 rsvd2:12; - - __u32 params_fixup:8; - __u32 converter:8; - __u32 input_pin_type:1; - __u32 output_pin_type:1; - __u32 is_dynamic_in_pin:1; - __u32 is_dynamic_out_pin:1; - __u32 is_loadable:1; - __u32 rsvd3:11; - - struct skl_dfw_v4_pipe pipe; - struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE]; - struct skl_dfw_v4_module_fmt out_fmt[MAX_OUT_QUEUE]; - struct skl_dfw_v4_module_pin in_pin[MAX_IN_QUEUE]; - struct skl_dfw_v4_module_pin out_pin[MAX_OUT_QUEUE]; - struct skl_dfw_v4_module_caps caps; -} __packed; - #endif diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index 45c657c391..937ed9408c 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ /** diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h index e9f697467a..fcfa71faf2 100644 --- a/include/uapi/sound/sof/fw.h +++ b/include/uapi/sound/sof/fw.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ /* diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h index cb3c1ace69..228d4c3eb2 100644 --- a/include/uapi/sound/sof/header.h +++ b/include/uapi/sound/sof/header.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation */ #ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index 6bf00c09d3..0a246bc218 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -3,7 +3,7 @@ * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * - * Copyright(c) 2018 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> * Keyon Jie <yang.jie@linux.intel.com> */ diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index a35e12f8e6..d965e4d127 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -374,7 +374,6 @@ struct ufs_hba_variant_ops { int (*get_outstanding_cqs)(struct ufs_hba *hba, unsigned long *ocqs); int (*config_esi)(struct ufs_hba *hba); - void (*config_scsi_dev)(struct scsi_device *sdev); }; /* clock gating state */ @@ -1132,6 +1131,12 @@ static inline bool is_mcq_enabled(struct ufs_hba *hba) return hba->mcq_enabled; } +static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba, + enum ufshcd_mcq_opr opr, int idx) +{ + return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx; +} + #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) { @@ -1390,8 +1395,6 @@ void ufshcd_release(struct ufs_hba *hba); void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value); -u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); - int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg); int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index a196e1c4c3..385e1c6b8d 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -355,12 +355,8 @@ enum { /* Interrupt disable masks */ enum { - /* Interrupt disable mask for UFSHCI v1.0 */ - INTERRUPT_MASK_ALL_VER_10 = 0x30FFF, - INTERRUPT_MASK_RW_VER_10 = 0x30000, - /* Interrupt disable mask for UFSHCI v1.1 */ - INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, + INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, /* Interrupt disable mask for UFSHCI v2.1 */ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF, @@ -425,13 +421,6 @@ union ufs_crypto_cfg_entry { * Request Descriptor Definitions */ -/* Transfer request command type */ -enum { - UTP_CMD_TYPE_SCSI = 0x0, - UTP_CMD_TYPE_UFS = 0x1, - UTP_CMD_TYPE_DEV_MANAGE = 0x2, -}; - /* To accommodate UFS2.0 required Command type */ enum { UTP_CMD_TYPE_UFS_STORAGE = 0x1, diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h index c71ddb6d46..d04d394db0 100644 --- a/include/vdso/datapage.h +++ b/include/vdso/datapage.h @@ -61,6 +61,7 @@ struct vdso_timestamp { * @seq: timebase sequence counter * @clock_mode: clock mode * @cycle_last: timebase at clocksource init + * @max_cycles: maximum cycles which won't overflow 64bit multiplication * @mask: clocksource mask * @mult: clocksource multiplier * @shift: clocksource shift @@ -92,6 +93,9 @@ struct vdso_data { s32 clock_mode; u64 cycle_last; +#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT + u64 max_cycles; +#endif u64 mask; u32 mult; u32 shift; diff --git a/include/vdso/math64.h b/include/vdso/math64.h index 7da703ee55..22ae212f8b 100644 --- a/include/vdso/math64.h +++ b/include/vdso/math64.h @@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_add_u64_shr +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + return (u64)((((unsigned __int128)a * mul) + b) >> shift); +} +#endif /* mul_u64_u32_add_u64_shr */ + +#else + +#ifndef mul_u64_u32_add_u64_shr +#ifndef mul_u32_u32 +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} +#define mul_u32_u32 mul_u32_u32 +#endif +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + u32 ah = a >> 32, al = a; + bool ovf; + u64 ret; + + ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret); + ret >>= shift; + if (ovf && shift) + ret += 1ULL << (64 - shift); + if (ah) + ret += mul_u32_u32(ah, mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_add_u64_shr */ + +#endif + #endif /* __VDSO_MATH64_H */ diff --git a/include/video/omapfb_dss.h b/include/video/omapfb_dss.h index e8eaac2cb7..a8c0c3eeeb 100644 --- a/include/video/omapfb_dss.h +++ b/include/video/omapfb_dss.h @@ -819,9 +819,6 @@ struct device_node * omapdss_of_get_next_endpoint(const struct device_node *parent, struct device_node *prev); -struct device_node * -omapdss_of_get_first_endpoint(const struct device_node *parent); - struct omap_dss_device * omapdss_of_find_source_for_first_ep(struct device_node *node); #else |