summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /include
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h187
-rw-r--r--include/acpi/actbl2.h12
-rw-r--r--include/acpi/cppc_acpi.h2
-rw-r--r--include/acpi/video.h9
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/cfi.h5
-rw-r--r--include/asm-generic/checksum.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h11
-rw-r--r--include/crypto/hash.h4
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/crypto/skcipher.h133
-rw-r--r--include/drm/bridge/aux-bridge.h52
-rw-r--r--include/drm/display/drm_dp.h28
-rw-r--r--include/drm/display/drm_dp_helper.h32
-rw-r--r--include/drm/display/drm_dp_mst_helper.h14
-rw-r--r--include/drm/drm_atomic_helper.h5
-rw-r--r--include/drm/drm_auth.h22
-rw-r--r--include/drm/drm_bridge.h2
-rw-r--r--include/drm/drm_color_mgmt.h19
-rw-r--r--include/drm/drm_device.h71
-rw-r--r--include/drm/drm_drv.h19
-rw-r--r--include/drm/drm_edid.h153
-rw-r--r--include/drm/drm_eld.h164
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_exec.h2
-rw-r--r--include/drm/drm_file.h5
-rw-r--r--include/drm/drm_flip_work.h20
-rw-r--r--include/drm/drm_format_helper.h81
-rw-r--r--include/drm/drm_framebuffer.h12
-rw-r--r--include/drm/drm_gem.h45
-rw-r--r--include/drm/drm_gem_atomic_helper.h10
-rw-r--r--include/drm/drm_gpuvm.h576
-rw-r--r--include/drm/drm_ioctl.h11
-rw-r--r--include/drm/drm_legacy.h331
-rw-r--r--include/drm/drm_mipi_dbi.h4
-rw-r--r--include/drm/drm_mode_object.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h16
-rw-r--r--include/drm/drm_plane.h21
-rw-r--r--include/drm/drm_plane_helper.h2
-rw-r--r--include/drm/drm_print.h2
-rw-r--r--include/drm/drm_property.h6
-rw-r--r--include/drm/gpu_scheduler.h56
-rw-r--r--include/drm/i915_pciids.h3
-rw-r--r--include/drm/ttm/ttm_pool.h2
-rw-r--r--include/drm/xe_pciids.h190
-rw-r--r--include/dt-bindings/arm/qcom,ids.h1
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h8
-rw-r--r--include/dt-bindings/clock/google,gs101.h392
-rw-r--r--include/dt-bindings/clock/mediatek,mt7988-clk.h280
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h6
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-ecpricc.h147
-rw-r--r--include/dt-bindings/clock/qcom,sc8280xp-camcc.h179
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-dispcc.h102
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gcc.h254
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gpucc.h43
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-tcsr.h18
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8150.h4
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h485
-rw-r--r--include/dt-bindings/clock/sophgo,cv1800.h176
-rw-r--r--include/dt-bindings/clock/st,stm32mp25-rcc.h492
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h69
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h19
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,sm6115.h111
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8650-rpmh.h154
-rw-r--r--include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h207
-rw-r--r--include/dt-bindings/power/meson-g12a-power.h1
-rw-r--r--include/dt-bindings/reset/amlogic,c3-reset.h119
-rw-r--r--include/dt-bindings/reset/mediatek,mt7988-resets.h13
-rw-r--r--include/dt-bindings/reset/mt8188-resets.h75
-rw-r--r--include/dt-bindings/reset/qcom,sm8650-gpucc.h20
-rw-r--r--include/dt-bindings/reset/st,stm32mp25-rcc.h167
-rw-r--r--include/dt-bindings/soc/rockchip,vop2.h4
-rw-r--r--include/kunit/device.h80
-rw-r--r--include/kunit/skbuff.h56
-rw-r--r--include/kunit/static_stub.h2
-rw-r--r--include/kunit/test.h52
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/linux/acpi.h33
-rw-r--r--include/linux/acpi_amd_wbrf.h91
-rw-r--r--include/linux/aer.h8
-rw-r--r--include/linux/amba/clcd-regs.h87
-rw-r--r--include/linux/amba/clcd.h290
-rw-r--r--include/linux/amba/serial.h261
-rw-r--r--include/linux/amd-pmf-io.h50
-rw-r--r--include/linux/anon_inodes.h4
-rw-r--r--include/linux/apple-mailbox.h19
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/audit.h1
-rw-r--r--include/linux/avf/virtchnl.h35
-rw-r--r--include/linux/backing-file.h42
-rw-r--r--include/linux/bio.h9
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blk_types.h16
-rw-r--r--include/linux/blkdev.h190
-rw-r--r--include/linux/bootconfig.h8
-rw-r--r--include/linux/bpf.h73
-rw-r--r--include/linux/bpf_mem_alloc.h8
-rw-r--r--include/linux/bpf_verifier.h172
-rw-r--r--include/linux/bpfilter.h24
-rw-r--r--include/linux/btf.h2
-rw-r--r--include/linux/buffer_head.h9
-rw-r--r--include/linux/cache.h25
-rw-r--r--include/linux/cdx/cdx_bus.h12
-rw-r--r--include/linux/ceph/osd_client.h7
-rw-r--r--include/linux/cfi.h12
-rw-r--r--include/linux/cgroup-defs.h21
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cleanup.h52
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/connector.h3
-rw-r--r--include/linux/container.h2
-rw-r--r--include/linux/coresight.h1
-rw-r--r--include/linux/cper.h23
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/cpuhotplug.h18
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/crash_core.h6
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/cxl-event.h143
-rw-r--r--include/linux/damon.h22
-rw-r--r--include/linux/dcache.h163
-rw-r--r--include/linux/device.h10
-rw-r--r--include/linux/device/bus.h7
-rw-r--r--include/linux/device/class.h2
-rw-r--r--include/linux/dma-buf.h11
-rw-r--r--include/linux/dma-direct.h19
-rw-r--r--include/linux/dma-fence.h8
-rw-r--r--include/linux/dma-map-ops.h9
-rw-r--r--include/linux/dpll.h25
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/efi.h12
-rw-r--r--include/linux/energy_model.h7
-rw-r--r--include/linux/entry-common.h95
-rw-r--r--include/linux/etherdevice.h25
-rw-r--r--include/linux/ethtool.h89
-rw-r--r--include/linux/eventfd.h17
-rw-r--r--include/linux/evm.h6
-rw-r--r--include/linux/export.h18
-rw-r--r--include/linux/f2fs_fs.h2
-rw-r--r--include/linux/fb.h16
-rw-r--r--include/linux/fdtable.h19
-rw-r--r--include/linux/file.h12
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/firewire.h2
-rw-r--r--include/linux/firmware.h2
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h55
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h10
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h59
-rw-r--r--include/linux/fortify-string.h51
-rw-r--r--include/linux/framer/framer-provider.h194
-rw-r--r--include/linux/framer/framer.h205
-rw-r--r--include/linux/framer/pef2256.h31
-rw-r--r--include/linux/fs.h132
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/fscache.h45
-rw-r--r--include/linux/fsnotify.h69
-rw-r--r--include/linux/fsnotify_backend.h14
-rw-r--r--include/linux/fw_table.h21
-rw-r--r--include/linux/gfp_types.h17
-rw-r--r--include/linux/gpio/driver.h53
-rw-r--r--include/linux/gpio/property.h1
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/habanalabs/cpucp_if.h8
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/hid_bpf.h13
-rw-r--r--include/linux/highmem.h76
-rw-r--r--include/linux/hisi_acc_qm.h8
-rw-r--r--include/linux/hrtimer.h46
-rw-r--r--include/linux/hrtimer_types.h50
-rw-r--r--include/linux/huge_mm.h184
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/i2c.h5
-rw-r--r--include/linux/i3c/device.h2
-rw-r--r--include/linux/i3c/master.h9
-rw-r--r--include/linux/ieee80211.h5
-rw-r--r--include/linux/if_vlan.h4
-rw-r--r--include/linux/iio/buffer-dma.h7
-rw-r--r--include/linux/iio/iio.h6
-rw-r--r--include/linux/iio/types.h1
-rw-r--r--include/linux/indirect_call_wrapper.h2
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/input/as5011.h1
-rw-r--r--include/linux/input/navpoint.h1
-rw-r--r--include/linux/intel-ish-client-if.h3
-rw-r--r--include/linux/intel_tpmi.h18
-rw-r--r--include/linux/interrupt.h3
-rw-r--r--include/linux/io-pgtable.h34
-rw-r--r--include/linux/io_uring.h92
-rw-r--r--include/linux/io_uring/cmd.h77
-rw-r--r--include/linux/io_uring_types.h32
-rw-r--r--include/linux/iommu.h118
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/ioprio.h25
-rw-r--r--include/linux/iosys-map.h44
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/irqflags.h16
-rw-r--r--include/linux/irqflags_types.h22
-rw-r--r--include/linux/ism.h1
-rw-r--r--include/linux/jbd2.h37
-rw-r--r--include/linux/kasan.h162
-rw-r--r--include/linux/kernfs.h2
-rw-r--r--include/linux/kexec.h9
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/kmsan_types.h2
-rw-r--r--include/linux/ksm.h10
-rw-r--r--include/linux/ktime.h8
-rw-r--r--include/linux/kvm_host.h207
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/leds.h20
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/linkmode.h5
-rw-r--r--include/linux/list.h20
-rw-r--r--include/linux/list_lru.h88
-rw-r--r--include/linux/lockdep.h57
-rw-r--r--include/linux/lockdep_types.h59
-rw-r--r--include/linux/lsm_hook_defs.h4
-rw-r--r--include/linux/lsm_hooks.h17
-rw-r--r--include/linux/maple.h1
-rw-r--r--include/linux/maple_tree.h349
-rw-r--r--include/linux/mdio.h1
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h37
-rw-r--r--include/linux/memory-tiers.h10
-rw-r--r--include/linux/mempool.h1
-rw-r--r--include/linux/memremap.h12
-rw-r--r--include/linux/mfd/max77693-private.h2
-rw-r--r--include/linux/mfd/max77843-private.h2
-rw-r--r--include/linux/mfd/si476x-platform.h2
-rw-r--r--include/linux/mfd/tps65910.h2
-rw-r--r--include/linux/mhi.h4
-rw-r--r--include/linux/mhi_ep.h21
-rw-r--r--include/linux/mii_timestamper.h4
-rw-r--r--include/linux/mlx5/device.h2
-rw-r--r--include/linux/mlx5/driver.h5
-rw-r--r--include/linux/mlx5/eswitch.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h68
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h4
-rw-r--r--include/linux/mlx5/vport.h1
-rw-r--r--include/linux/mm.h59
-rw-r--r--include/linux/mm_types.h34
-rw-r--r--include/linux/mm_types_task.h5
-rw-r--r--include/linux/mmc/card.h5
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmzone.h52
-rw-r--r--include/linux/mnt_idmapping.h3
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/moduleparam.h6
-rw-r--r--include/linux/mount.h5
-rw-r--r--include/linux/moxtet.h2
-rw-r--r--include/linux/mtd/rawnand.h13
-rw-r--r--include/linux/mutex.h55
-rw-r--r--include/linux/mutex_types.h71
-rw-r--r--include/linux/net/intel/i40e_client.h2
-rw-r--r--include/linux/netdevice.h196
-rw-r--r--include/linux/netfilter_ipv6.h8
-rw-r--r--include/linux/netfs.h181
-rw-r--r--include/linux/netlink.h7
-rw-r--r--include/linux/nfs4.h22
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/node.h26
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--include/linux/nodemask_types.h10
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/nubus.h2
-rw-r--r--include/linux/numa.h19
-rw-r--r--include/linux/nvme.h12
-rw-r--r--include/linux/nvmem-consumer.h8
-rw-r--r--include/linux/nvmem-provider.h70
-rw-r--r--include/linux/of_device.h11
-rw-r--r--include/linux/of_iommu.h13
-rw-r--r--include/linux/of_platform.h4
-rw-r--r--include/linux/overflow.h12
-rw-r--r--include/linux/page-flags.h153
-rw-r--r--include/linux/pageblock-flags.h4
-rw-r--r--include/linux/pagemap.h17
-rw-r--r--include/linux/pci-ecam.h2
-rw-r--r--include/linux/pci-epc.h13
-rw-r--r--include/linux/pci-epf.h4
-rw-r--r--include/linux/pci.h12
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf/arm_pmu.h28
-rw-r--r--include/linux/perf/arm_pmuv3.h34
-rw-r--r--include/linux/perf_event.h22
-rw-r--r--include/linux/pgtable.h30
-rw-r--r--include/linux/phy.h90
-rw-r--r--include/linux/phylink.h66
-rw-r--r--include/linux/pid.h140
-rw-r--r--include/linux/pid_types.h16
-rw-r--r--include/linux/pinctrl/machine.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h10
-rw-r--r--include/linux/pinctrl/pinconf.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h24
-rw-r--r--include/linux/pinctrl/pinmux.h22
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h2
-rw-r--r--include/linux/platform_data/keypad-omap.h3
-rw-r--r--include/linux/platform_data/microchip-ksz.h23
-rw-r--r--include/linux/platform_data/si5351.h2
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h2
-rw-r--r--include/linux/plist.h12
-rw-r--r--include/linux/plist_types.h17
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pm_clock.h4
-rw-r--r--include/linux/pm_domain.h12
-rw-r--r--include/linux/pm_opp.h24
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/poison.h5
-rw-r--r--include/linux/posix-timers.h69
-rw-r--r--include/linux/posix-timers_types.h80
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/prandom.h1
-rw-r--r--include/linux/preempt.h6
-rw-r--r--include/linux/printk.h2
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/property.h46
-rw-r--r--include/linux/pwm.h84
-rw-r--r--include/linux/quotaops.h15
-rw-r--r--include/linux/randomize_kstack.h2
-rw-r--r--include/linux/rculist.h2
-rw-r--r--include/linux/rcupdate.h7
-rw-r--r--include/linux/rcupdate_wait.h10
-rw-r--r--include/linux/reboot.h12
-rw-r--r--include/linux/refcount.h13
-rw-r--r--include/linux/refcount_types.h19
-rw-r--r--include/linux/regulator/consumer.h47
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/regulator/machine.h18
-rw-r--r--include/linux/restart_block.h2
-rw-r--r--include/linux/resume_user_mode.h1
-rw-r--r--include/linux/rhashtable-types.h2
-rw-r--r--include/linux/ring_buffer.h21
-rw-r--r--include/linux/rmap.h411
-rw-r--r--include/linux/rseq.h131
-rw-r--r--include/linux/rslib.h1
-rw-r--r--include/linux/rtnetlink.h41
-rw-r--r--include/linux/rtsx_pci.h8
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/sched.h406
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/signal.h5
-rw-r--r--include/linux/sched/task.h4
-rw-r--r--include/linux/sched/task_stack.h1
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/seccomp.h24
-rw-r--r--include/linux/seccomp_types.h35
-rw-r--r--include/linux/secretmem.h4
-rw-r--r--include/linux/security.h47
-rw-r--r--include/linux/sem.h10
-rw-r--r--include/linux/sem_types.h13
-rw-r--r--include/linux/seq_buf.h17
-rw-r--r--include/linux/seqlock.h79
-rw-r--r--include/linux/seqlock_types.h93
-rw-r--r--include/linux/serdev.h29
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/shm.h4
-rw-r--r--include/linux/shmem_fs.h9
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/signal_types.h2
-rw-r--r--include/linux/sizes.h9
-rw-r--r--include/linux/skbuff.h52
-rw-r--r--include/linux/skmsg.h5
-rw-r--r--include/linux/slab.h24
-rw-r--r--include/linux/slab_def.h124
-rw-r--r--include/linux/slub_def.h204
-rw-r--r--include/linux/soc/apple/rtkit.h18
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h8
-rw-r--r--include/linux/soundwire/sdw.h6
-rw-r--r--include/linux/spi/spi-mem.h2
-rw-r--r--include/linux/spi/spi.h65
-rw-r--r--include/linux/spinlock.h72
-rw-r--r--include/linux/splice.h51
-rw-r--r--include/linux/spmi.h3
-rw-r--r--include/linux/stackdepot.h107
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/bc_xprt.h3
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h37
-rw-r--r--include/linux/sunrpc/svc_rdma.h67
-rw-r--r--include/linux/sunrpc/svcauth.h7
-rw-r--r--include/linux/sunrpc/xprt.h11
-rw-r--r--include/linux/surface_aggregator/device.h1
-rw-r--r--include/linux/surface_aggregator/serial_hub.h4
-rw-r--r--include/linux/swap.h8
-rw-r--r--include/linux/swapops.h65
-rw-r--r--include/linux/syscall_user_dispatch.h9
-rw-r--r--include/linux/syscall_user_dispatch_types.h22
-rw-r--r--include/linux/syscalls.h14
-rw-r--r--include/linux/sysctl.h7
-rw-r--r--include/linux/tcp.h254
-rw-r--r--include/linux/tee_drv.h16
-rw-r--r--include/linux/thermal.h33
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/time_namespace.h3
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/linux/timer.h16
-rw-r--r--include/linux/timer_types.h23
-rw-r--r--include/linux/timerqueue.h13
-rw-r--r--include/linux/timerqueue_types.h17
-rw-r--r--include/linux/tnum.h4
-rw-r--r--include/linux/torture.h1
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/trace_events.h5
-rw-r--r--include/linux/trace_seq.h15
-rw-r--r--include/linux/tty.h20
-rw-r--r--include/linux/tty_driver.h9
-rw-r--r--include/linux/tty_port.h8
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/u64_stats_sync.h9
-rw-r--r--include/linux/udp.h30
-rw-r--r--include/linux/uidgid.h24
-rw-r--r--include/linux/uidgid_types.h15
-rw-r--r--include/linux/uio.h18
-rw-r--r--include/linux/usb.h31
-rw-r--r--include/linux/usb/hcd.h5
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/tcpci.h5
-rw-r--r--include/linux/usb/tcpm.h1
-rw-r--r--include/linux/userfaultfd_k.h11
-rw-r--r--include/linux/vfio.h7
-rw-r--r--include/linux/vfio_pci_core.h20
-rw-r--r--include/linux/virtio.h8
-rw-r--r--include/linux/virtio_config.h4
-rw-r--r--include/linux/virtio_console.h38
-rw-r--r--include/linux/virtio_pci_admin.h23
-rw-r--r--include/linux/virtio_pci_modern.h2
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h60
-rw-r--r--include/linux/w1-gpio.h22
-rw-r--r--include/linux/wait.h1
-rw-r--r--include/linux/wmi.h20
-rw-r--r--include/linux/workqueue.h53
-rw-r--r--include/linux/workqueue_types.h25
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/linux/zswap.h32
-rw-r--r--include/media/cec.h22
-rw-r--r--include/media/v4l2-cci.h5
-rw-r--r--include/media/v4l2-common.h4
-rw-r--r--include/media/v4l2-mem2mem.h9
-rw-r--r--include/media/v4l2-subdev.h419
-rw-r--r--include/media/videobuf2-core.h61
-rw-r--r--include/net/act_api.h6
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/af_unix.h5
-rw-r--r--include/net/bluetooth/bluetooth.h11
-rw-r--r--include/net/bluetooth/hci.h25
-rw-r--r--include/net/bluetooth/hci_core.h63
-rw-r--r--include/net/bluetooth/hci_sync.h20
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/cfg80211.h148
-rw-r--r--include/net/cfg802154.h72
-rw-r--r--include/net/dropreason-core.h24
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/genetlink.h55
-rw-r--r--include/net/ieee802154_netdev.h60
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/inet_hashtables.h21
-rw-r--r--include/net/inet_sock.h5
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip.h10
-rw-r--r--include/net/ip_tunnels.h44
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/iucv/iucv.h4
-rw-r--r--include/net/mac80211.h61
-rw-r--r--include/net/macsec.h56
-rw-r--r--include/net/mana/gdma.h12
-rw-r--r--include/net/mana/mana.h47
-rw-r--r--include/net/netdev_rx_queue.h4
-rw-r--r--include/net/netfilter/nf_flow_table.h21
-rw-r--r--include/net/netfilter/nf_tables.h63
-rw-r--r--include/net/netlink.h47
-rw-r--r--include/net/netns/core.h1
-rw-r--r--include/net/netns/ipv4.h50
-rw-r--r--include/net/netns/smc.h2
-rw-r--r--include/net/nl802154.h22
-rw-r--r--include/net/page_pool/helpers.h85
-rw-r--r--include/net/page_pool/types.h49
-rw-r--r--include/net/pkt_cls.h6
-rw-r--r--include/net/pkt_sched.h18
-rw-r--r--include/net/sch_generic.h44
-rw-r--r--include/net/scm.h9
-rw-r--r--include/net/smc.h16
-rw-r--r--include/net/sock.h76
-rw-r--r--include/net/tc_act/tc_ipt.h17
-rw-r--r--include/net/tc_act/tc_mirred.h1
-rw-r--r--include/net/tc_wrapper.h4
-rw-r--r--include/net/tcp.h22
-rw-r--r--include/net/tcp_ao.h6
-rw-r--r--include/net/tcp_states.h2
-rw-r--r--include/net/tls.h3
-rw-r--r--include/net/vxlan.h33
-rw-r--r--include/net/xdp.h20
-rw-r--r--include/net/xdp_sock.h113
-rw-r--r--include/net/xdp_sock_drv.h51
-rw-r--r--include/net/xfrm.h9
-rw-r--r--include/net/xsk_buff_pool.h10
-rw-r--r--include/soc/fsl/qe/qmc.h27
-rw-r--r--include/soc/microchip/mpfs.h2
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h1
-rw-r--r--include/soc/tegra/mc.h1
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/cs35l56.h7
-rw-r--r--include/sound/cs4271.h1
-rw-r--r--include/sound/hda_codec.h5
-rw-r--r--include/sound/hdaudio.h15
-rw-r--r--include/sound/hdaudio_ext.h3
-rw-r--r--include/sound/pcm.h7
-rw-r--r--include/sound/pcm_params.h2
-rw-r--r--include/sound/rt5682s.h8
-rw-r--r--include/sound/simple_card_utils.h3
-rw-r--r--include/sound/soc.h59
-rw-r--r--include/sound/sof.h15
-rw-r--r--include/sound/sof/dai-imx.h7
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/ipc4/header.h35
-rw-r--r--include/sound/sof/topology.h61
-rw-r--r--include/sound/tas2781.h12
-rw-r--r--include/sound/wm0010.h6
-rw-r--r--include/sound/wm1250-ev1.h24
-rw-r--r--include/sound/wm2200.h2
-rw-r--r--include/sound/wm5100.h4
-rw-r--r--include/sound/wm8996.h3
-rw-r--r--include/trace/events/afs.h800
-rw-r--r--include/trace/events/btrfs.h78
-rw-r--r--include/trace/events/ext4.h11
-rw-r--r--include/trace/events/f2fs.h127
-rw-r--r--include/trace/events/ksm.h33
-rw-r--r--include/trace/events/kvm.h8
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--include/trace/events/netfs.h155
-rw-r--r--include/trace/events/rpcgss.h4
-rw-r--r--include/trace/events/rpcrdma.h238
-rw-r--r--include/trace/events/sched.h15
-rw-r--r--include/trace/events/sunrpc.h1
-rw-r--r--include/trace/events/timer.h40
-rw-r--r--include/uapi/asm-generic/unistd.h15
-rw-r--r--include/uapi/drm/drm.h72
-rw-r--r--include/uapi/drm/drm_fourcc.h10
-rw-r--r--include/uapi/drm/drm_mode.h45
-rw-r--r--include/uapi/drm/habanalabs_accel.h28
-rw-r--r--include/uapi/drm/i915_drm.h12
-rw-r--r--include/uapi/drm/ivpu_accel.h28
-rw-r--r--include/uapi/drm/msm_drm.h3
-rw-r--r--include/uapi/drm/nouveau_drm.h14
-rw-r--r--include/uapi/drm/pvr_drm.h1295
-rw-r--r--include/uapi/drm/qaic_accel.h5
-rw-r--r--include/uapi/drm/v3d_drm.h245
-rw-r--r--include/uapi/drm/virtgpu_drm.h2
-rw-r--r--include/uapi/drm/xe_drm.h1327
-rw-r--r--include/uapi/linux/android/binder.h30
-rw-r--r--include/uapi/linux/batadv_packet.h45
-rw-r--r--include/uapi/linux/bpf.h41
-rw-r--r--include/uapi/linux/bpfilter.h21
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/cxl_mem.h1
-rw-r--r--include/uapi/linux/devlink.h2
-rw-r--r--include/uapi/linux/dpll.h1
-rw-r--r--include/uapi/linux/ethtool.h41
-rw-r--r--include/uapi/linux/ethtool_netlink.h1
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--include/uapi/linux/if_link.h529
-rw-r--r--include/uapi/linux/if_xdp.h47
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/io_uring.h19
-rw-r--r--include/uapi/linux/iommufd.h79
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/kvm.h140
-rw-r--r--include/uapi/linux/lsm.h90
-rw-r--r--include/uapi/linux/mei.h4
-rw-r--r--include/uapi/linux/mount.h70
-rw-r--r--include/uapi/linux/mptcp.h1
-rw-r--r--include/uapi/linux/mptcp_pm.h2
-rw-r--r--include/uapi/linux/netdev.h80
-rw-r--r--include/uapi/linux/nl80211.h185
-rw-r--r--include/uapi/linux/nsm.h31
-rw-r--r--include/uapi/linux/pcitest.h3
-rw-r--r--include/uapi/linux/perf_event.h13
-rw-r--r--include/uapi/linux/pkt_cls.h51
-rw-r--r--include/uapi/linux/pkt_sched.h109
-rw-r--r--include/uapi/linux/raid/md_p.h8
-rw-r--r--include/uapi/linux/raid/md_u.h11
-rw-r--r--include/uapi/linux/resource.h2
-rw-r--r--include/uapi/linux/serial.h20
-rw-r--r--include/uapi/linux/smc.h2
-rw-r--r--include/uapi/linux/smc_diag.h2
-rw-r--r--include/uapi/linux/stat.h1
-rw-r--r--include/uapi/linux/sync_file.h22
-rw-r--r--include/uapi/linux/tc_act/tc_ipt.h20
-rw-r--r--include/uapi/linux/tc_act/tc_mirred.h1
-rw-r--r--include/uapi/linux/thp7312.h19
-rw-r--r--include/uapi/linux/usb/functionfs.h6
-rw-r--r--include/uapi/linux/userfaultfd.h29
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/v4l2-subdev.h15
-rw-r--r--include/uapi/linux/vfio.h1
-rw-r--r--include/uapi/linux/videodev2.h11
-rw-r--r--include/uapi/linux/virtio_pci.h68
-rw-r--r--include/uapi/linux/virtio_pmem.h7
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h41
-rw-r--r--include/uapi/rdma/efa-abi.h21
-rw-r--r--include/uapi/rdma/hns-abi.h5
-rw-r--r--include/uapi/rdma/mlx5-abi.h2
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h1
-rw-r--r--include/uapi/regulator/regulator.h90
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h2
-rw-r--r--include/uapi/sound/asound.h9
-rw-r--r--include/uapi/sound/scarlett2.h54
-rw-r--r--include/uapi/sound/sof/tokens.h5
-rw-r--r--include/uapi/xen/gntalloc.h5
-rw-r--r--include/ufs/ufs.h14
-rw-r--r--include/ufs/ufshcd.h12
-rw-r--r--include/ufs/unipro.h4
-rw-r--r--include/vdso/gettime.h23
-rw-r--r--include/video/sticore.h6
-rw-r--r--include/xen/interface/io/displif.h2
-rw-r--r--include/xen/interface/io/ring.h2
-rw-r--r--include/xen/interface/io/sndif.h2
624 files changed, 19754 insertions, 6693 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1216d72c65..8b45b82cd5 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -14,7 +14,7 @@
struct acpi_handle_list {
u32 count;
- acpi_handle* handles;
+ acpi_handle *handles;
};
/* acpi_utils.h */
@@ -25,16 +25,15 @@ acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list);
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list);
bool acpi_handle_list_equal(struct acpi_handle_list *list1,
struct acpi_handle_list *list2);
void acpi_handle_list_replace(struct acpi_handle_list *dst,
struct acpi_handle_list *src);
void acpi_handle_list_free(struct acpi_handle_list *list);
+bool acpi_device_dep(acpi_handle target, acpi_handle match);
acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf);
@@ -366,6 +365,98 @@ struct acpi_device_data {
struct acpi_gpio_mapping;
+#define ACPI_DEVICE_SWNODE_ROOT 0
+
+/*
+ * The maximum expected number of CSI-2 data lanes.
+ *
+ * This number is not expected to ever have to be equal to or greater than the
+ * number of bits in an unsigned long variable, but if it needs to be increased
+ * above that limit, code will need to be adjusted accordingly.
+ */
+#define ACPI_DEVICE_CSI2_DATA_LANES 8
+
+#define ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH 8
+
+enum acpi_device_swnode_dev_props {
+ ACPI_DEVICE_SWNODE_DEV_ROTATION,
+ ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY,
+ ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US,
+ ACPI_DEVICE_SWNODE_DEV_NUM_OF,
+ ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_port_props {
+ ACPI_DEVICE_SWNODE_PORT_REG,
+ ACPI_DEVICE_SWNODE_PORT_NUM_OF,
+ ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_ep_props {
+ ACPI_DEVICE_SWNODE_EP_REMOTE_EP,
+ ACPI_DEVICE_SWNODE_EP_BUS_TYPE,
+ ACPI_DEVICE_SWNODE_EP_REG,
+ ACPI_DEVICE_SWNODE_EP_CLOCK_LANES,
+ ACPI_DEVICE_SWNODE_EP_DATA_LANES,
+ ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES,
+ /* TX only */
+ ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES,
+ ACPI_DEVICE_SWNODE_EP_NUM_OF,
+ ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES
+};
+
+/*
+ * Each device has a root software node plus two times as many nodes as the
+ * number of CSI-2 ports.
+ */
+#define ACPI_DEVICE_SWNODE_PORT(port) (2 * (port) + 1)
+#define ACPI_DEVICE_SWNODE_EP(endpoint) \
+ (ACPI_DEVICE_SWNODE_PORT(endpoint) + 1)
+
+/**
+ * struct acpi_device_software_node_port - MIPI DisCo for Imaging CSI-2 port
+ * @port_name: Port name.
+ * @data_lanes: "data-lanes" property values.
+ * @lane_polarities: "lane-polarities" property values.
+ * @link_frequencies: "link_frequencies" property values.
+ * @port_nr: Port number.
+ * @crs_crs2_local: _CRS CSI2 record present (i.e. this is a transmitter one).
+ * @port_props: Port properties.
+ * @ep_props: Endpoint properties.
+ * @remote_ep: Reference to the remote endpoint.
+ */
+struct acpi_device_software_node_port {
+ char port_name[ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH + 1];
+ u32 data_lanes[ACPI_DEVICE_CSI2_DATA_LANES];
+ u32 lane_polarities[ACPI_DEVICE_CSI2_DATA_LANES + 1 /* clock lane */];
+ u64 link_frequencies[ACPI_DEVICE_CSI2_DATA_LANES];
+ unsigned int port_nr;
+ bool crs_csi2_local;
+
+ struct property_entry port_props[ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES];
+ struct property_entry ep_props[ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES];
+
+ struct software_node_ref_args remote_ep[1];
+};
+
+/**
+ * struct acpi_device_software_nodes - Software nodes for an ACPI device
+ * @dev_props: Device properties.
+ * @nodes: Software nodes for root as well as ports and endpoints.
+ * @nodeprts: Array of software node pointers, for (un)registering them.
+ * @ports: Information related to each port and endpoint within a port.
+ * @num_ports: The number of ports.
+ */
+struct acpi_device_software_nodes {
+ struct property_entry dev_props[ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES];
+ struct software_node *nodes;
+ const struct software_node **nodeptrs;
+ struct acpi_device_software_node_port *ports;
+ unsigned int num_ports;
+};
+
/* Device */
struct acpi_device {
u32 pld_crc;
@@ -384,6 +475,7 @@ struct acpi_device {
struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
+ struct acpi_device_software_nodes *swnodes;
const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
@@ -627,6 +719,8 @@ struct acpi_pci_root {
/* helper */
+struct iommu_ops;
+
bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
@@ -655,6 +749,7 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
bool acpi_quirk_skip_acpi_ac_and_battery(void);
int acpi_install_cmos_rtc_space_handler(acpi_handle handle);
void acpi_remove_cmos_rtc_space_handler(acpi_handle handle);
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
#else
static inline bool acpi_device_override_status(struct acpi_device *adev,
unsigned long long *status)
@@ -672,23 +767,22 @@ static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
{
}
+static inline int
+acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ *skip = false;
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
-int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
bool acpi_quirk_skip_gpio_event_handlers(void);
#else
static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
{
return false;
}
-static inline int
-acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
-{
- *skip = false;
- return 0;
-}
static inline bool acpi_quirk_skip_gpio_event_handlers(void)
{
return false;
@@ -764,10 +858,73 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
-bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2);
-bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
+static inline bool acpi_dev_hid_match(struct acpi_device *adev, const char *hid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+
+ return hid1 && hid2 && !strcmp(hid1, hid2);
+}
+
+static inline bool acpi_str_uid_match(struct acpi_device *adev, const char *uid2)
+{
+ const char *uid1 = acpi_device_uid(adev);
+
+ return uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
+{
+ u64 uid1;
+
+ return !acpi_dev_uid_to_integer(adev, &uid1) && uid1 == uid2;
+}
+
+#define TYPE_ENTRY(type, x) \
+ const type: x, \
+ type: x
+
+#define ACPI_STR_TYPES(match) \
+ TYPE_ENTRY(unsigned char *, match), \
+ TYPE_ENTRY(signed char *, match), \
+ TYPE_ENTRY(char *, match), \
+ TYPE_ENTRY(void *, match)
+
+/**
+ * acpi_dev_uid_match - Match device by supplied UID
+ * @adev: ACPI device to match.
+ * @uid2: Unique ID of the device.
+ *
+ * Matches UID in @adev with given @uid2.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+#define acpi_dev_uid_match(adev, uid2) \
+ _Generic(uid2, \
+ /* Treat @uid2 as a string for acpi string types */ \
+ ACPI_STR_TYPES(acpi_str_uid_match), \
+ /* Treat as an integer otherwise */ \
+ default: acpi_int_uid_match)(adev, uid2)
+
+/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
+ * will be treated as a match. If user wants to validate @uid2, it should be
+ * done before calling this function.
+ *
+ * Returns: %true if matches or @uid2 is NULL, %false otherwise.
+ */
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
+ (acpi_dev_hid_match(adev, hid2) && \
+ /* Distinguish integer 0 from NULL @uid2 */ \
+ (_Generic(uid2, ACPI_STR_TYPES(!(uid2)), default: 0) || \
+ acpi_dev_uid_match(adev, uid2)))
+
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 3751ae6943..9775384d61 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1046,6 +1046,8 @@ struct acpi_madt_generic_interrupt {
/* ACPI_MADT_ENABLED (1) Processor is usable if set */
#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
+#define ACPI_MADT_GICC_ONLINE_CAPABLE (1<<3) /* 03: Processor is online capable */
+#define ACPI_MADT_GICC_NON_COHERENT (1<<4) /* 04: GIC redistributor is not coherent */
/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
@@ -1090,21 +1092,27 @@ struct acpi_madt_generic_msi_frame {
struct acpi_madt_generic_redistributor {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u64 base_address;
u32 length;
};
+#define ACPI_MADT_GICR_NON_COHERENT (1)
+
/* 15: Generic Translator (ACPI 6.0) */
struct acpi_madt_generic_translator {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u32 translation_id;
u64 base_address;
u32 reserved2;
};
+#define ACPI_MADT_ITS_NON_COHERENT (1)
+
/* 16: Multiprocessor wakeup (ACPI 6.4) */
struct acpi_madt_multiproc_wakeup {
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 6126c977ec..3a0995f8bc 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -144,6 +144,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool cppc_perf_ctrs_in_pcc(void);
+extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
+extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
extern bool acpi_cpc_valid(void);
extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 4230392b5b..3d538d4178 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -75,6 +75,15 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
return __acpi_video_get_backlight_type(false, NULL);
}
+/*
+ * This function MUST only be called by GPU drivers to check if the driver
+ * should register a backlight class device. This function not only checks
+ * if a GPU native backlight device should be registered it *also* tells
+ * the ACPI video-detect code that native GPU backlight control is available.
+ * Therefor calling this from any place other then the GPU driver is wrong!
+ * To check if GPU native backlight control is used in other places instead use:
+ * if (acpi_video_get_backlight_type() == acpi_backlight_native) { ... }
+ */
static inline bool acpi_video_backlight_use_native(void)
{
return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index def242528b..d436bee4d1 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -11,6 +11,7 @@ mandatory-y += bitops.h
mandatory-y += bug.h
mandatory-y += bugs.h
mandatory-y += cacheflush.h
+mandatory-y += cfi.h
mandatory-y += checksum.h
mandatory-y += compat.h
mandatory-y += current.h
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 961f4d88f9..1985c22d90 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -296,5 +296,13 @@ do { \
#define io_stop_wc() do { } while (0)
#endif
+/*
+ * Architectures that guarantee an implicit smp_mb() in switch_mm()
+ * can override smp_mb__after_switch_mm.
+ */
+#ifndef smp_mb__after_switch_mm
+# define smp_mb__after_switch_mm() smp_mb()
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/cfi.h b/include/asm-generic/cfi.h
new file mode 100644
index 0000000000..41fac3537b
--- /dev/null
+++ b/include/asm-generic/cfi.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CFI_H
+#define __ASM_GENERIC_CFI_H
+
+#endif /* __ASM_GENERIC_CFI_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 43e18db89c..ad928cce26 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -2,6 +2,8 @@
#ifndef __ASM_GENERIC_CHECKSUM_H
#define __ASM_GENERIC_CHECKSUM_H
+#include <linux/bitops.h>
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -31,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return (__force __sum16)~sum;
+ return (__force __sum16)((~sum - ror32(sum, 16)) >> 16);
}
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bae0fe4d49..5dd3a61d67 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -370,7 +370,8 @@
BRANCH_PROFILE() \
TRACE_PRINTKS() \
BPF_RAW_TP() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ KUNIT_TABLE()
/*
* Data section helpers
@@ -700,7 +701,7 @@
EARLYCON_TABLE() \
LSM_TABLE() \
EARLY_LSM_TABLE() \
- KUNIT_TABLE()
+ KUNIT_INIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -926,6 +927,12 @@
. = ALIGN(8); \
BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end)
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_INIT_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \
+ __kunit_init_suites, _start, _end)
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index c7bdbece27..5d61f576cf 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -212,13 +212,9 @@ struct shash_desc {
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
* @clone_tfm: Copy transform into new object, may allocate memory.
- * @digestsize: see struct ahash_alg
- * @statesize: see struct ahash_alg
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
- * @stat: Statistics for hash algorithm.
- * @base: internally used
* @halg: see struct hash_alg_common
* @HASH_ALG_COMMON: see struct hash_alg_common
*/
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 08b803a4fc..78ecaf5db0 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -121,6 +121,7 @@ struct af_alg_async_req {
*
* @tsgl_list: Link to TX SGL
* @iv: IV for cipher operation
+ * @state: Existing state for continuing operation
* @aead_assoclen: Length of AAD for AEAD cipher operations
* @completion: Work queue for synchronous operation
* @used: TX bytes sent to kernel. This variable is used to
@@ -142,6 +143,7 @@ struct af_alg_ctx {
struct list_head tsgl_list;
void *iv;
+ void *state;
size_t aead_assoclen;
struct crypto_wait wait;
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index ea18af4834..c8857d7bdb 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -15,6 +15,17 @@
#include <linux/string.h>
#include <linux/types.h>
+/* Set this bit if the lskcipher operation is a continuation. */
+#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001
+/* Set this bit if the lskcipher operation is final. */
+#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002
+/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */
+
+/* Set this bit if the skcipher operation is a continuation. */
+#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001
+/* Set this bit if the skcipher operation is not final. */
+#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
+
struct scatterlist;
/**
@@ -91,6 +102,7 @@ struct crypto_istat_cipher {
* IV of exactly that size to perform the encrypt or decrypt operation.
* @chunksize: Equal to the block size except for stream ciphers such as
* CTR where it is set to the underlying block size.
+ * @statesize: Size of the internal state for the algorithm.
* @stat: Statistics for cipher algorithm
* @base: Definition of a generic crypto algorithm.
*/
@@ -99,6 +111,7 @@ struct crypto_istat_cipher {
unsigned int max_keysize; \
unsigned int ivsize; \
unsigned int chunksize; \
+ unsigned int statesize; \
\
SKCIPHER_ALG_COMMON_STAT \
\
@@ -108,16 +121,6 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
/**
* struct skcipher_alg - symmetric key cipher definition
- * @min_keysize: Minimum key size supported by the transformation. This is the
- * smallest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MIN_KEY_SIZE" include/crypto/
- * @max_keysize: Maximum key size supported by the transformation. This is the
- * largest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MAX_KEY_SIZE" include/crypto/
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
@@ -141,6 +144,17 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
@@ -152,15 +166,9 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- * @chunksize: Equal to the block size except for stream ciphers such as
- * CTR where it is set to the underlying block size.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
- * @stat: Statistics for cipher algorithm
- * @base: Definition of a generic crypto algorithm.
* @co: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
@@ -170,6 +178,8 @@ struct skcipher_alg {
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
+ int (*export)(struct skcipher_request *req, void *out);
+ int (*import)(struct skcipher_request *req, const void *in);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
@@ -200,6 +210,9 @@ struct skcipher_alg {
* may be left over if length is not a multiple of blocks
* and there is more to come (final == false). The number of
* left-over bytes should be returned in case of success.
+ * The siv field shall be as long as ivsize + statesize with
+ * the IV placed at the front. The state will be used by the
+ * algorithm internally.
* @decrypt: Decrypt a number of bytes. This is a reverse counterpart to
* @encrypt and the conditions are exactly the same.
* @init: Initialize the cryptographic transformation object. This function
@@ -215,9 +228,9 @@ struct lskcipher_alg {
int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv, bool final);
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv, bool final);
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
int (*init)(struct crypto_lskcipher *tfm);
void (*exit)(struct crypto_lskcipher *tfm);
@@ -496,6 +509,40 @@ static inline unsigned int crypto_lskcipher_chunksize(
return crypto_lskcipher_alg(tfm)->co.chunksize;
}
+/**
+ * crypto_skcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_skcipher_statesize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_common(tfm)->statesize;
+}
+
+/**
+ * crypto_lskcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_lskcipher_statesize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.statesize;
+}
+
static inline unsigned int crypto_sync_skcipher_blocksize(
struct crypto_sync_skcipher *tfm)
{
@@ -684,14 +731,48 @@ int crypto_skcipher_encrypt(struct skcipher_request *req);
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
+ * crypto_skcipher_export() - export partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @out: output buffer of sufficient size that can hold the state
+ *
+ * Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_export(struct skcipher_request *req, void *out);
+
+/**
+ * crypto_skcipher_import() - import partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @in: buffer holding the state
+ *
+ * Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_import(struct skcipher_request *req, const void *in);
+
+/**
* crypto_lskcipher_encrypt() - encrypt plaintext
* @tfm: lskcipher handle
* @src: source buffer
* @dst: destination buffer
* @len: number of bytes to process
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_lskcipher_ivsize
- *
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
* Encrypt plaintext data using the lskcipher handle.
*
* Return: >=0 if the cipher operation was successful, if positive
@@ -699,7 +780,7 @@ int crypto_skcipher_decrypt(struct skcipher_request *req);
* < 0 if an error occurred
*/
int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv);
+ u8 *dst, unsigned len, u8 *siv);
/**
* crypto_lskcipher_decrypt() - decrypt ciphertext
@@ -707,8 +788,10 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
* @src: source buffer
* @dst: destination buffer
* @len: number of bytes to process
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_lskcipher_ivsize
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
*
* Decrypt ciphertext data using the lskcipher handle.
*
@@ -717,7 +800,7 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
* < 0 if an error occurred
*/
int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
- u8 *dst, unsigned len, u8 *iv);
+ u8 *dst, unsigned len, u8 *siv);
/**
* DOC: Symmetric Key Cipher Request Handle
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
new file mode 100644
index 0000000000..4453906105
--- /dev/null
+++ b/include/drm/bridge/aux-bridge.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#ifndef DRM_AUX_BRIDGE_H
+#define DRM_AUX_BRIDGE_H
+
+#include <drm/drm_connector.h>
+
+struct auxiliary_device;
+
+#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
+int drm_aux_bridge_register(struct device *parent);
+#else
+static inline int drm_aux_bridge_register(struct device *parent)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np);
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev);
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np);
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
+#else
+static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index e69cece404..3731828825 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -148,6 +148,7 @@
#define DP_RECEIVE_PORT_0_CAP_0 0x008
# define DP_LOCAL_EDID_PRESENT (1 << 1)
# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+# define DP_HBLANK_EXPANSION_CAPABLE (1 << 3)
#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
@@ -543,6 +544,10 @@
/* DFP Capability Extension */
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
+# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
+
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
@@ -646,6 +651,9 @@
# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49
+# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
@@ -699,6 +707,7 @@
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
# define DP_DECOMPRESSION_EN (1 << 0)
+# define DP_DSC_PASSTHROUGH_EN (1 << 1)
#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
@@ -716,6 +725,13 @@
#define DP_BRANCH_DEVICE_CTRL 0x1a1
# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
+#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_ENABLE (1 << 0)
+# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4)
+# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5)
+# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6)
+
#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
@@ -1105,6 +1121,18 @@
#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+#define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/
+# define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2)
+
+#define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */
+# define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0)
+# define DP_SINK_FRAME_LOCKED_SHIFT 3
+# define DP_SINK_FRAME_LOCKED_MASK (3 << 3)
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5)
+
/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
#define DP_DP13_DPCD_REV 0x2200
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 3d74b2cec7..863b2e7add 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -164,6 +164,7 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp);
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
@@ -251,6 +252,19 @@ drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP);
}
+/**
+ * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR
+ * @link_rate: link rate in 10kbits/s units
+ *
+ * Determine if the provided link rate is an UHBR rate.
+ *
+ * Returns: %True if @link_rate is an UHBR rate.
+ */
+static inline bool drm_dp_is_uhbr_rate(int link_rate)
+{
+ return link_rate >= 1000000;
+}
+
/*
* DisplayPort AUX channel
*/
@@ -632,6 +646,13 @@ enum drm_dp_quirk {
* the DP_MAX_LINK_RATE register reporting a lower max multiplier.
*/
DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
+ /**
+ * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC:
+ *
+ * The device applies HBLANK expansion for some modes, but this
+ * requires enabling DSC.
+ */
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC,
};
/**
@@ -781,4 +802,15 @@ bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZ
const u8 port_cap[4], u8 color_spc);
int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
+#define DRM_DP_BW_OVERHEAD_MST BIT(0)
+#define DRM_DP_BW_OVERHEAD_UHBR BIT(1)
+#define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2)
+#define DRM_DP_BW_OVERHEAD_FEC BIT(3)
+#define DRM_DP_BW_OVERHEAD_DSC BIT(4)
+
+int drm_dp_bw_overhead(int lane_count, int hactive,
+ int dsc_slice_count,
+ int bpp_x16, unsigned long flags);
+int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index 655862b3d2..9b19d8bd52 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_fixed.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stackdepot.h>
@@ -617,7 +618,7 @@ struct drm_dp_mst_topology_state {
* @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
* out itself.
*/
- int pbn_div;
+ fixed20_12 pbn_div;
};
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
@@ -839,8 +840,8 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
-int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count);
+fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp);
@@ -892,6 +893,9 @@ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *
drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
struct drm_dp_mst_port *port);
+bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent);
int __must_check
drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
@@ -913,6 +917,10 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
struct drm_dp_query_stream_enc_status_ack_reply *status);
+int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_port **failing_port);
int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 006b5c977a..9aa0a05aa0 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -49,9 +49,8 @@ struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
-int
-drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
- struct drm_connector_state *conn_state);
+int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
int min_scale,
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index ba248ca886..50131383ed 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -33,24 +33,6 @@
#include <linux/wait.h>
struct drm_file;
-struct drm_hw_lock;
-
-/*
- * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
- * include ordering reasons.
- *
- * DO NOT USE.
- */
-struct drm_lock_data {
- struct drm_hw_lock *hw_lock;
- struct drm_file *file_priv;
- wait_queue_head_t lock_queue;
- unsigned long lock_time;
- spinlock_t spinlock;
- uint32_t kernel_waiters;
- uint32_t user_waiters;
- int idle_has_lock;
-};
/**
* struct drm_master - drm master structure
@@ -145,10 +127,6 @@ struct drm_master {
* Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
*/
struct idr lessee_idr;
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- struct drm_lock_data lock;
-#endif
};
struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 99aea53601..b7aed3ead7 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -983,6 +983,4 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
-void drm_bridge_debugfs_init(struct drm_device *dev);
-
#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 6b5eec10c3..ed81741036 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -37,20 +37,17 @@ struct drm_plane;
*
* Extract a degamma/gamma LUT value provided by user (in the form of
* &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
+ * hardware, following OpenGL int<->float conversion rules
+ * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions).
*/
static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
{
- u32 val = user_input;
- u32 max = 0xffff >> (16 - bit_precision);
-
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
-
- return clamp_val(val, 0, max);
+ if (bit_precision > 16)
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1),
+ (1 << 16) - 1);
+ else
+ return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1),
+ (1 << 16) - 1);
}
u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index c490977ee2..63767cf243 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -6,7 +6,6 @@
#include <linux/mutex.h>
#include <linux/idr.h>
-#include <drm/drm_legacy.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
@@ -153,8 +152,8 @@ struct drm_device {
*
* Lock for others (not &drm_minor.master and &drm_file.is_master)
*
- * WARNING:
- * Only drivers annotated with DRIVER_LEGACY should be using this.
+ * TODO: This lock used to be the BKL of the DRM subsystem. Move the
+ * lock into i915, which is the only remaining user.
*/
struct mutex struct_mutex;
@@ -317,72 +316,6 @@ struct drm_device {
* Root directory for debugfs files.
*/
struct dentry *debugfs_root;
-
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- /* List of devices per driver for stealth attach cleanup */
- struct list_head legacy_dev_list;
-
-#ifdef __alpha__
- /** @hose: PCI hose, only used on ALPHA platforms. */
- struct pci_controller *hose;
-#endif
-
- /* AGP data */
- struct drm_agp_head *agp;
-
- /* Context handle management - linked list of context handles */
- struct list_head ctxlist;
-
- /* Context handle management - mutex for &ctxlist */
- struct mutex ctxlist_mutex;
-
- /* Context handle management */
- struct idr ctx_idr;
-
- /* Memory management - linked list of regions */
- struct list_head maplist;
-
- /* Memory management - user token hash table for maps */
- struct drm_open_hash map_hash;
-
- /* Context handle management - list of vmas (for debugging) */
- struct list_head vmalist;
-
- /* Optional pointer for DMA support */
- struct drm_device_dma *dma;
-
- /* Context swapping flag */
- __volatile__ long context_flag;
-
- /* Last current context */
- int last_context;
-
- /* Lock for &buf_use and a few other things. */
- spinlock_t buf_lock;
-
- /* Usage counter for buffers in use -- cannot alloc */
- int buf_use;
-
- /* Buffer allocation in progress */
- atomic_t buf_alloc;
-
- struct {
- int context;
- struct drm_hw_lock *lock;
- } sigdata;
-
- struct drm_local_map *agp_buffer_map;
- unsigned int agp_buffer_token;
-
- /* Scatter gather memory */
- struct drm_sg_mem *sg;
-
- /* IRQs */
- bool irq_enabled;
- int irq;
-#endif
};
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index ea36aa79dc..8878260d75 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -442,25 +442,6 @@ struct drm_driver {
* some examples.
*/
const struct file_operations *fops;
-
-#ifdef CONFIG_DRM_LEGACY
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-
- int (*firstopen) (struct drm_device *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- irqreturn_t (*irq_handler)(int irq, void *arg);
- void (*irq_preinstall)(struct drm_device *dev);
- int (*irq_postinstall)(struct drm_device *dev);
- void (*irq_uninstall)(struct drm_device *dev);
- u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
- int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
- void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
- int dev_priv_size;
-#endif
};
void *__devm_drm_dev_alloc(struct device *parent,
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 882d263870..518d1b8106 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -269,64 +269,6 @@ struct detailed_timing {
#define DRM_EDID_DSC_MAX_SLICES 0xf
#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
-/* ELD Header Block */
-#define DRM_ELD_HEADER_BLOCK_SIZE 4
-
-#define DRM_ELD_VER 0
-# define DRM_ELD_VER_SHIFT 3
-# define DRM_ELD_VER_MASK (0x1f << 3)
-# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
-# define DRM_ELD_VER_CANNED (0x1f << 3)
-
-#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
-
-/* ELD Baseline Block for ELD_Ver == 2 */
-#define DRM_ELD_CEA_EDID_VER_MNL 4
-# define DRM_ELD_CEA_EDID_VER_SHIFT 5
-# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
-# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
-# define DRM_ELD_MNL_SHIFT 0
-# define DRM_ELD_MNL_MASK (0x1f << 0)
-
-#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
-# define DRM_ELD_SAD_COUNT_SHIFT 4
-# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
-# define DRM_ELD_CONN_TYPE_SHIFT 2
-# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
-# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
-# define DRM_ELD_CONN_TYPE_DP (1 << 2)
-# define DRM_ELD_SUPPORTS_AI (1 << 1)
-# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
-
-#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
-# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
-
-#define DRM_ELD_SPEAKER 7
-# define DRM_ELD_SPEAKER_MASK 0x7f
-# define DRM_ELD_SPEAKER_RLRC (1 << 6)
-# define DRM_ELD_SPEAKER_FLRC (1 << 5)
-# define DRM_ELD_SPEAKER_RC (1 << 4)
-# define DRM_ELD_SPEAKER_RLR (1 << 3)
-# define DRM_ELD_SPEAKER_FC (1 << 2)
-# define DRM_ELD_SPEAKER_LFE (1 << 1)
-# define DRM_ELD_SPEAKER_FLR (1 << 0)
-
-#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
-# define DRM_ELD_PORT_ID_LEN 8
-
-#define DRM_ELD_MANUFACTURER_NAME0 16
-#define DRM_ELD_MANUFACTURER_NAME1 17
-
-#define DRM_ELD_PRODUCT_CODE0 18
-#define DRM_ELD_PRODUCT_CODE1 19
-
-#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
-
-#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
-
struct edid {
u8 header[8];
/* Vendor & product info */
@@ -387,11 +329,6 @@ int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
-int __drm_set_edid_firmware_path(const char *path);
-int __drm_get_edid_firmware_path(char *buf, size_t bufsize);
-#endif
-
bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
int
@@ -410,96 +347,6 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
enum hdmi_quantization_range rgb_quant_range);
/**
- * drm_eld_mnl - Get ELD monitor name length in bytes.
- * @eld: pointer to an eld memory structure with mnl set
- */
-static inline int drm_eld_mnl(const uint8_t *eld)
-{
- return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
-}
-
-/**
- * drm_eld_sad - Get ELD SAD structures.
- * @eld: pointer to an eld memory structure with sad_count set
- */
-static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
-{
- unsigned int ver, mnl;
-
- ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
- if (ver != 2 && ver != 31)
- return NULL;
-
- mnl = drm_eld_mnl(eld);
- if (mnl > 16)
- return NULL;
-
- return eld + DRM_ELD_CEA_SAD(mnl, 0);
-}
-
-/**
- * drm_eld_sad_count - Get ELD SAD count.
- * @eld: pointer to an eld memory structure with sad_count set
- */
-static inline int drm_eld_sad_count(const uint8_t *eld)
-{
- return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
- DRM_ELD_SAD_COUNT_SHIFT;
-}
-
-/**
- * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
- * @eld: pointer to an eld memory structure with mnl and sad_count set
- *
- * This is a helper for determining the payload size of the baseline block, in
- * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
- */
-static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
-{
- return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
- drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
-}
-
-/**
- * drm_eld_size - Get ELD size in bytes
- * @eld: pointer to a complete eld memory structure
- *
- * The returned value does not include the vendor block. It's vendor specific,
- * and comprises of the remaining bytes in the ELD memory buffer after
- * drm_eld_size() bytes of header and baseline block.
- *
- * The returned value is guaranteed to be a multiple of 4.
- */
-static inline int drm_eld_size(const uint8_t *eld)
-{
- return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
-}
-
-/**
- * drm_eld_get_spk_alloc - Get speaker allocation
- * @eld: pointer to an ELD memory structure
- *
- * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
- * field definitions to identify speakers.
- */
-static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld)
-{
- return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
-}
-
-/**
- * drm_eld_get_conn_type - Get device type hdmi/dp connected
- * @eld: pointer to an ELD memory structure
- *
- * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
- * identify the display type connected.
- */
-static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
-{
- return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
-}
-
-/**
* drm_edid_decode_mfg_id - Decode the manufacturer ID
* @mfg_id: The manufacturer ID
* @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h
new file mode 100644
index 0000000000..0a88d10b28
--- /dev/null
+++ b/include/drm/drm_eld.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __DRM_ELD_H__
+#define __DRM_ELD_H__
+
+#include <linux/types.h>
+
+struct cea_sad;
+
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_MASK 0x7f
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const u8 *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad);
+int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad);
+
+/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const u8 *drm_eld_sad(const u8 *eld)
+{
+ unsigned int ver, mnl;
+
+ ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+ if (ver != 2 && ver != 31)
+ return NULL;
+
+ mnl = drm_eld_mnl(eld);
+ if (mnl > 16)
+ return NULL;
+
+ return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const u8 *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const u8 *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const u8 *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+/**
+ * drm_eld_get_spk_alloc - Get speaker allocation
+ * @eld: pointer to an ELD memory structure
+ *
+ * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
+ * field definitions to identify speakers.
+ */
+static inline u8 drm_eld_get_spk_alloc(const u8 *eld)
+{
+ return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
+}
+
+/**
+ * drm_eld_get_conn_type - Get device type hdmi/dp connected
+ * @eld: pointer to an ELD memory structure
+ *
+ * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
+ * identify the display type connected.
+ */
+static inline u8 drm_eld_get_conn_type(const u8 *eld)
+{
+ return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
+}
+
+#endif /* __DRM_ELD_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 3a09682af6..977a9381c8 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -60,7 +60,7 @@ struct drm_encoder_funcs {
* @late_register:
*
* This optional hook can be used to register additional userspace
- * interfaces attached to the encoder like debugfs interfaces.
+ * interfaces attached to the encoder.
* It is called late in the driver load sequence from drm_dev_register().
* Everything added from this callback should be unregistered in
* the early_unregister callback.
@@ -81,6 +81,13 @@ struct drm_encoder_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_encoder *encoder);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows encoders to create encoder-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root);
};
/**
@@ -184,6 +191,13 @@ struct drm_encoder {
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
+
+ /**
+ * @debugfs_entry:
+ *
+ * Debugfs directory for this CRTC.
+ */
+ struct dentry *debugfs_entry;
};
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
index b5bf0b6da7..f1a66c0487 100644
--- a/include/drm/drm_exec.h
+++ b/include/drm/drm_exec.h
@@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec)
return !!exec->contended;
}
-void drm_exec_init(struct drm_exec *exec, uint32_t flags);
+void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr);
void drm_exec_fini(struct drm_exec *exec);
bool drm_exec_cleanup(struct drm_exec *exec);
int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 8f35dcea82..ab230d3af1 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -386,11 +386,6 @@ struct drm_file {
* Per-file buffer caches used by the PRIME buffer sharing code.
*/
struct drm_prime_file_private prime;
-
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- unsigned long lock_count; /* DRI1 legacy lock count */
-#endif
};
/**
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 21c3d512d2..1eef3283a1 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -31,11 +31,10 @@
/**
* DOC: flip utils
*
- * Util to queue up work to run from work-queue context after flip/vblank.
+ * Utility to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all thread-safe.
- * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
- * in atomic context.
+ * bo's, etc until after vblank. The APIs are all thread-safe. Moreover,
+ * drm_flip_work_commit() can be called in atomic context.
*/
struct drm_flip_work;
@@ -52,16 +51,6 @@ struct drm_flip_work;
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
- * struct drm_flip_task - flip work task
- * @node: list entry element
- * @data: data to pass to &drm_flip_work.func
- */
-struct drm_flip_task {
- struct list_head node;
- void *data;
-};
-
-/**
* struct drm_flip_work - flip work queue
* @name: debug name
* @func: callback fxn called for each committed item
@@ -79,9 +68,6 @@ struct drm_flip_work {
spinlock_t lock;
};
-struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
-void drm_flip_work_queue_task(struct drm_flip_work *work,
- struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 291deb0947..f13b34e0b7 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -15,6 +15,57 @@ struct drm_rect;
struct iosys_map;
+/**
+ * struct drm_format_conv_state - Stores format-conversion state
+ *
+ * DRM helpers for format conversion store temporary state in
+ * struct drm_xfrm_buf. The buffer's resources can be reused
+ * among multiple conversion operations.
+ *
+ * All fields are considered private.
+ */
+struct drm_format_conv_state {
+ struct {
+ void *mem;
+ size_t size;
+ bool preallocated;
+ } tmp;
+};
+
+#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \
+ .tmp = { \
+ .mem = (_mem), \
+ .size = (_size), \
+ .preallocated = (_preallocated), \
+ } \
+ }
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state
+ *
+ * Initializes an instance of struct drm_format_conv_state to default values.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT \
+ __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false)
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state
+ * @_mem: The preallocated memory area
+ * @_size: The number of bytes in _mem
+ *
+ * Initializes an instance of struct drm_format_conv_state to preallocated
+ * storage. The caller is responsible for releasing the provided memory range.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \
+ __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true)
+
+void drm_format_conv_state_init(struct drm_format_conv_state *state);
+void drm_format_conv_state_copy(struct drm_format_conv_state *state,
+ const struct drm_format_conv_state *old_state);
+void *drm_format_conv_state_reserve(struct drm_format_conv_state *state,
+ size_t new_size, gfp_t flags);
+void drm_format_conv_state_release(struct drm_format_conv_state *state);
+
unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
const struct drm_rect *clip);
@@ -23,45 +74,49 @@ void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct drm_rect *clip);
void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool cached);
+ const struct drm_rect *clip, bool cached,
+ struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool swab);
+ const struct drm_rect *clip, struct drm_format_conv_state *state,
+ bool swab);
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *rect);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 80ece7b6dd..668077009f 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -189,18 +189,6 @@ struct drm_framebuffer {
*/
int flags;
/**
- * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_x;
- /**
- * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_y;
- /**
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 16364487fd..2ebec3984c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -553,6 +553,19 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
int drm_gem_evict(struct drm_gem_object *obj);
+/**
+ * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
+ *
+ * This helper should only be used for fdinfo shared memory stats to determine
+ * if a GEM object is shared.
+ *
+ * @obj: obj in question
+ */
+static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj)
+{
+ return (obj->handle_count > 1) || obj->dma_buf;
+}
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
@@ -580,7 +593,7 @@ int drm_gem_evict(struct drm_gem_object *obj);
* drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
* @obj: the &drm_gem_object
*
- * This initializes the &drm_gem_object's &drm_gpuva list.
+ * This initializes the &drm_gem_object's &drm_gpuvm_bo list.
*
* Calling this function is only necessary for drivers intending to support the
* &drm_driver_feature DRIVER_GEM_GPUVA.
@@ -593,28 +606,28 @@ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
}
/**
- * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas
- * @entry__: &drm_gpuva structure to assign to in each iteration step
- * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
*
- * This iterator walks over all &drm_gpuva structures associated with the
- * &drm_gpuva_manager.
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object.
*/
-#define drm_gem_for_each_gpuva(entry__, obj__) \
- list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry)
+#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem)
/**
- * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of
- * gpuvas
- * @entry__: &drm_gpuva structure to assign to in each iteration step
- * @next__: &next &drm_gpuva to store the next step
- * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of
+ * &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step
+ * @next__: &next &drm_gpuvm_bo to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
*
- * This iterator walks over all &drm_gpuva structures associated with the
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
* &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
* it is save against removal of elements.
*/
-#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \
- list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry)
+#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem)
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
index 40b8b03951..3e01c619a2 100644
--- a/include/drm/drm_gem_atomic_helper.h
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -5,6 +5,7 @@
#include <linux/iosys-map.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
@@ -49,6 +50,15 @@ struct drm_shadow_plane_state {
/** @base: plane state */
struct drm_plane_state base;
+ /**
+ * @fmtcnv_state: Format-conversion state
+ *
+ * Per-plane state for format conversion.
+ * Flags for copying shadow buffers into backend storage. Also holds
+ * temporary storage for format conversion.
+ */
+ struct drm_format_conv_state fmtcnv_state;
+
/* Transitional state - do not export or duplicate */
/**
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index c7a0594bda..9060f9fae6 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -25,13 +25,17 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-resv.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/types.h>
+#include <drm/drm_device.h>
#include <drm/drm_gem.h>
+#include <drm/drm_exec.h>
struct drm_gpuvm;
+struct drm_gpuvm_bo;
struct drm_gpuvm_ops;
/**
@@ -73,6 +77,12 @@ struct drm_gpuva {
struct drm_gpuvm *vm;
/**
+ * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
+ * &drm_gem_object
+ */
+ struct drm_gpuvm_bo *vm_bo;
+
+ /**
* @flags: the &drm_gpuva_flags for this mapping
*/
enum drm_gpuva_flags flags;
@@ -82,7 +92,7 @@ struct drm_gpuva {
*/
struct {
/**
- * @addr: the start address
+ * @va.addr: the start address
*/
u64 addr;
@@ -97,17 +107,17 @@ struct drm_gpuva {
*/
struct {
/**
- * @offset: the offset within the &drm_gem_object
+ * @gem.offset: the offset within the &drm_gem_object
*/
u64 offset;
/**
- * @obj: the mapped &drm_gem_object
+ * @gem.obj: the mapped &drm_gem_object
*/
struct drm_gem_object *obj;
/**
- * @entry: the &list_head to attach this object to a &drm_gem_object
+ * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
*/
struct list_head entry;
} gem;
@@ -117,12 +127,12 @@ struct drm_gpuva {
*/
struct {
/**
- * @rb: the rb-tree node
+ * @rb.node: the rb-tree node
*/
struct rb_node node;
/**
- * @entry: The &list_head to additionally connect &drm_gpuvas
+ * @rb.entry: The &list_head to additionally connect &drm_gpuvas
* in the same order they appear in the interval tree. This is
* useful to keep iterating &drm_gpuvas from a start node found
* through the rb-tree while doing modifications on the rb-tree
@@ -131,7 +141,7 @@ struct drm_gpuva {
struct list_head entry;
/**
- * @__subtree_last: needed by the interval tree, holding last-in-subtree
+ * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
*/
u64 __subtree_last;
} rb;
@@ -140,7 +150,7 @@ struct drm_gpuva {
int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
void drm_gpuva_remove(struct drm_gpuva *va);
-void drm_gpuva_link(struct drm_gpuva *va);
+void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
void drm_gpuva_unlink(struct drm_gpuva *va);
struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
@@ -177,6 +187,8 @@ static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
* drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
* is invalidated
* @va: the &drm_gpuva to check
+ *
+ * Returns: %true if the GPU VA is invalidated, %false otherwise
*/
static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
{
@@ -184,6 +196,22 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
}
/**
+ * enum drm_gpuvm_flags - flags for struct drm_gpuvm
+ */
+enum drm_gpuvm_flags {
+ /**
+ * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
+ * GPUVM's &dma_resv lock
+ */
+ DRM_GPUVM_RESV_PROTECTED = BIT(0),
+
+ /**
+ * @DRM_GPUVM_USERBITS: user defined bits
+ */
+ DRM_GPUVM_USERBITS = BIT(1),
+};
+
+/**
* struct drm_gpuvm - DRM GPU VA Manager
*
* The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
@@ -202,6 +230,16 @@ struct drm_gpuvm {
const char *name;
/**
+ * @flags: the &drm_gpuvm_flags of this GPUVM
+ */
+ enum drm_gpuvm_flags flags;
+
+ /**
+ * @drm: the &drm_device this VM lives in
+ */
+ struct drm_device *drm;
+
+ /**
* @mm_start: start of the VA space
*/
u64 mm_start;
@@ -216,17 +254,22 @@ struct drm_gpuvm {
*/
struct {
/**
- * @tree: the rb-tree to track GPU VA mappings
+ * @rb.tree: the rb-tree to track GPU VA mappings
*/
struct rb_root_cached tree;
/**
- * @list: the &list_head to track GPU VA mappings
+ * @rb.list: the &list_head to track GPU VA mappings
*/
struct list_head list;
} rb;
/**
+ * @kref: reference count of this object
+ */
+ struct kref kref;
+
+ /**
* @kernel_alloc_node:
*
* &drm_gpuva representing the address space cutout reserved for
@@ -238,16 +281,149 @@ struct drm_gpuvm {
* @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
*/
const struct drm_gpuvm_ops *ops;
+
+ /**
+ * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
+ */
+ struct drm_gem_object *r_obj;
+
+ /**
+ * @extobj: structure holding the extobj list
+ */
+ struct {
+ /**
+ * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
+ * external object
+ */
+ struct list_head list;
+
+ /**
+ * @extobj.local_list: pointer to the local list temporarily
+ * storing entries from the external object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @extobj.lock: spinlock to protect the extobj list
+ */
+ spinlock_t lock;
+ } extobj;
+
+ /**
+ * @evict: structure holding the evict list and evict list lock
+ */
+ struct {
+ /**
+ * @evict.list: &list_head storing &drm_gpuvm_bos currently
+ * being evicted
+ */
+ struct list_head list;
+
+ /**
+ * @evict.local_list: pointer to the local list temporarily
+ * storing entries from the evicted object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @evict.lock: spinlock to protect the evict list
+ */
+ spinlock_t lock;
+ } evict;
};
void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ enum drm_gpuvm_flags flags,
+ struct drm_device *drm,
+ struct drm_gem_object *r_obj,
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
const struct drm_gpuvm_ops *ops);
-void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
+/**
+ * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to acquire the reference of
+ *
+ * This function acquires an additional reference to @gpuvm. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct drm_gpuvm pointer
+ */
+static inline struct drm_gpuvm *
+drm_gpuvm_get(struct drm_gpuvm *gpuvm)
+{
+ kref_get(&gpuvm->kref);
+
+ return gpuvm;
+}
+
+void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
+
+bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
+struct drm_gem_object *
+drm_gpuvm_resv_object_alloc(struct drm_device *drm);
+
+/**
+ * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
+ * set
+ * @gpuvm: the &drm_gpuvm
+ *
+ * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
+ */
+static inline bool
+drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
+{
+ return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
+}
+
+/**
+ * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
+ */
+#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
+
+/**
+ * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
+ * &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
+ * &dma_resv
+ */
+#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+/**
+ * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
+ * external object
+ * @gpuvm: the &drm_gpuvm to check
+ * @obj: the &drm_gem_object to check
+ *
+ * Returns: true if the &drm_gem_object &dma_resv differs from the
+ * &drm_gpuvms &dma_resv, false otherwise
+ */
+static inline bool
+drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ return obj && obj->resv != drm_gpuvm_resv(gpuvm);
+}
+
static inline struct drm_gpuva *
__drm_gpuva_next(struct drm_gpuva *va)
{
@@ -327,6 +503,291 @@ __drm_gpuva_next(struct drm_gpuva *va)
list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
/**
+ * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
+ *
+ * This structure should be created on the stack as &drm_exec should be.
+ *
+ * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
+ */
+struct drm_gpuvm_exec {
+ /**
+ * @exec: the &drm_exec structure
+ */
+ struct drm_exec exec;
+
+ /**
+ * @flags: the flags for the struct drm_exec
+ */
+ uint32_t flags;
+
+ /**
+ * @vm: the &drm_gpuvm to lock its DMA reservations
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @num_fences: the number of fences to reserve for the &dma_resv of the
+ * locked &drm_gem_objects
+ */
+ unsigned int num_fences;
+
+ /**
+ * @extra: Callback and corresponding private data for the driver to
+ * lock arbitrary additional &drm_gem_objects.
+ */
+ struct {
+ /**
+ * @extra.fn: The driver callback to lock additional
+ * &drm_gem_objects.
+ */
+ int (*fn)(struct drm_gpuvm_exec *vm_exec);
+
+ /**
+ * @extra.priv: driver private data for the @fn callback
+ */
+ void *priv;
+ } extra;
+};
+
+int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ u64 addr, u64 range,
+ unsigned int num_fences);
+
+int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
+
+int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
+ struct drm_gem_object **objs,
+ unsigned int num_objs);
+
+int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
+ u64 addr, u64 range);
+
+/**
+ * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * Releases all dma-resv locks of all &drm_gem_objects previously acquired
+ * through drm_gpuvm_exec_lock() or its variants.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline void
+drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
+{
+ drm_exec_fini(&vm_exec->exec);
+}
+
+int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
+void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage);
+
+/**
+ * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @fence: fence to add
+ * @private_usage: private dma-resv usage
+ * @extobj_usage: extobj dma-resv usage
+ *
+ * See drm_gpuvm_resv_add_fence().
+ */
+static inline void
+drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
+ private_usage, extobj_usage);
+}
+
+/**
+ * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * See drm_gpuvm_validate().
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline int
+drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
+{
+ return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+}
+
+/**
+ * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
+ * &drm_gem_object combination
+ *
+ * This structure is an abstraction representing a &drm_gpuvm and
+ * &drm_gem_object combination. It serves as an indirection to accelerate
+ * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
+ * &drm_gem_object.
+ *
+ * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
+ * accelerate validation.
+ *
+ * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
+ * a GEM object is mapped first in a GPU-VM and release the instance once the
+ * last mapping of the GEM object in this GPU-VM is unmapped.
+ */
+struct drm_gpuvm_bo {
+ /**
+ * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
+ * counted pointer.
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @obj: The &drm_gem_object being mapped in @vm. This is a reference
+ * counted pointer.
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @evicted: Indicates whether the &drm_gem_object is evicted; field
+ * protected by the &drm_gem_object's dma-resv lock.
+ */
+ bool evicted;
+
+ /**
+ * @kref: The reference count for this &drm_gpuvm_bo.
+ */
+ struct kref kref;
+
+ /**
+ * @list: Structure containing all &list_heads.
+ */
+ struct {
+ /**
+ * @list.gpuva: The list of linked &drm_gpuvas.
+ *
+ * It is safe to access entries from this list as long as the
+ * GEM's gpuva lock is held. See also struct drm_gem_object.
+ */
+ struct list_head gpuva;
+
+ /**
+ * @list.entry: Structure containing all &list_heads serving as
+ * entry.
+ */
+ struct {
+ /**
+ * @list.entry.gem: List entry to attach to the
+ * &drm_gem_objects gpuva list.
+ */
+ struct list_head gem;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms extobj list.
+ */
+ struct list_head extobj;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms evict list.
+ */
+ struct list_head evict;
+ } entry;
+ } list;
+};
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
+ *
+ * This function acquires an additional reference to @vm_bo. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct vm_bo pointer
+ */
+static inline struct drm_gpuvm_bo *
+drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+ kref_get(&vm_bo->kref);
+ return vm_bo;
+}
+
+bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
+
+/**
+ * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
+ * to/from the &drm_gpuvms evicted list
+ * @obj: the &drm_gem_object
+ * @evict: indicates whether @obj is evicted
+ *
+ * See drm_gpuvm_bo_evict().
+ */
+static inline void
+drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj)
+ drm_gpuvm_bo_evict(vm_bo, evict);
+}
+
+void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
+ list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
+ * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
+ * &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
+ list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
* enum drm_gpuva_op_type - GPU VA operation type
*
* Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
@@ -366,12 +827,12 @@ struct drm_gpuva_op_map {
*/
struct {
/**
- * @addr: the base address of the new mapping
+ * @va.addr: the base address of the new mapping
*/
u64 addr;
/**
- * @range: the range of the new mapping
+ * @va.range: the range of the new mapping
*/
u64 range;
} va;
@@ -381,12 +842,12 @@ struct drm_gpuva_op_map {
*/
struct {
/**
- * @offset: the offset within the &drm_gem_object
+ * @gem.offset: the offset within the &drm_gem_object
*/
u64 offset;
/**
- * @obj: the &drm_gem_object to map
+ * @gem.obj: the &drm_gem_object to map
*/
struct drm_gem_object *obj;
} gem;
@@ -557,6 +1018,16 @@ struct drm_gpuva_ops {
list_for_each_entry_from_reverse(op, &(ops)->list, entry)
/**
+ * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations in reverse
+ */
+#define drm_gpuva_for_each_op_reverse(op, ops) \
+ list_for_each_entry_reverse(op, &(ops)->list, entry)
+
+/**
* drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
* @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
*/
@@ -595,8 +1066,7 @@ drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
struct drm_gpuva_ops *
-drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
- struct drm_gem_object *obj);
+drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_ops *ops);
@@ -617,6 +1087,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
*/
struct drm_gpuvm_ops {
/**
+ * @vm_free: called when the last reference of a struct drm_gpuvm is
+ * dropped
+ *
+ * This callback is mandatory.
+ */
+ void (*vm_free)(struct drm_gpuvm *gpuvm);
+
+ /**
* @op_alloc: called when the &drm_gpuvm allocates
* a struct drm_gpuva_op
*
@@ -641,6 +1119,42 @@ struct drm_gpuvm_ops {
void (*op_free)(struct drm_gpuva_op *op);
/**
+ * @vm_bo_alloc: called when the &drm_gpuvm allocates
+ * a struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
+
+ /**
+ * @vm_bo_free: called when the &drm_gpuvm frees a
+ * struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
+
+ /**
+ * @vm_bo_validate: called from drm_gpuvm_validate()
+ *
+ * Drivers receive this callback for every evicted &drm_gem_object being
+ * mapped in the corresponding &drm_gpuvm.
+ *
+ * Typically, drivers would call their driver specific variant of
+ * ttm_bo_validate() from within this callback.
+ */
+ int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
+ struct drm_exec *exec);
+
+ /**
* @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
* mapping once all previous steps were completed
*
@@ -702,4 +1216,32 @@ void drm_gpuva_remap(struct drm_gpuva *prev,
void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+/**
+ * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
+ * the unmap stage of a remap op.
+ * @op: Remap op.
+ * @start_addr: Output pointer for the start of the required unmap.
+ * @range: Output pointer for the length of the required unmap.
+ *
+ * The given start address and range will be set such that they represent the
+ * range of the address space that was previously covered by the mapping being
+ * re-mapped, but is now empty.
+ */
+static inline void
+drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
+ u64 *start_addr, u64 *range)
+{
+ const u64 va_start = op->prev ?
+ op->prev->va.addr + op->prev->va.range :
+ op->unmap->va->va.addr;
+ const u64 va_end = op->next ?
+ op->next->va.addr :
+ op->unmap->va->va.addr + op->unmap->va->va.range;
+
+ if (start_addr)
+ *start_addr = va_start;
+ if (range)
+ *range = va_end - va_start;
+}
+
#endif /* __DRM_GPUVM_H__ */
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 6ed61c371f..171760b6c4 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -110,17 +110,6 @@ enum drm_ioctl_flags {
*/
DRM_ROOT_ONLY = BIT(2),
/**
- * @DRM_UNLOCKED:
- *
- * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
- * or not. Enforced as the default for all modern drivers, hence there
- * should never be a need to set this flag.
- *
- * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the
- * only legacy IOCTL which needs this.
- */
- DRM_UNLOCKED = BIT(4),
- /**
* @DRM_RENDER_ALLOW:
*
* This is used for all ioctl needed for rendering only, for drivers
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
deleted file mode 100644
index 0fc85418aa..0000000000
--- a/include/drm/drm_legacy.h
+++ /dev/null
@@ -1,331 +0,0 @@
-#ifndef __DRM_DRM_LEGACY_H__
-#define __DRM_DRM_LEGACY_H__
-/*
- * Legacy driver interfaces for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- * Copyright © 2014 Intel Corporation
- * Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/agp_backend.h>
-
-#include <drm/drm.h>
-#include <drm/drm_auth.h>
-
-struct drm_device;
-struct drm_driver;
-struct file;
-struct pci_driver;
-
-/*
- * Legacy Support for palateontologic DRM drivers
- *
- * If you add a new driver and it uses any of these functions or structures,
- * you're doing it terribly wrong.
- */
-
-/*
- * Hash-table Support
- */
-
-struct drm_hash_item {
- struct hlist_node head;
- unsigned long key;
-};
-
-struct drm_open_hash {
- struct hlist_head *table;
- u8 order;
-};
-
-/**
- * DMA buffer.
- */
-struct drm_buf {
- int idx; /**< Index into master buflist */
- int total; /**< Buffer size */
- int order; /**< log-base-2(total) */
- int used; /**< Amount of buffer in use (for DMA) */
- unsigned long offset; /**< Byte offset (used internally) */
- void *address; /**< Address of buffer */
- unsigned long bus_address; /**< Bus address of buffer */
- struct drm_buf *next; /**< Kernel-only: used for free list */
- __volatile__ int waiting; /**< On kernel DMA queue */
- __volatile__ int pending; /**< On hardware DMA queue */
- struct drm_file *file_priv; /**< Private of holding file descr */
- int context; /**< Kernel queue for this buffer */
- int while_locked; /**< Dispatch this buffer while locked */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /**< Which list we're on */
-
- int dev_priv_size; /**< Size of buffer private storage */
- void *dev_private; /**< Per-buffer private storage */
-};
-
-typedef struct drm_dma_handle {
- dma_addr_t busaddr;
- void *vaddr;
- size_t size;
-} drm_dma_handle_t;
-
-/**
- * Buffer entry. There is one of this for each buffer size order.
- */
-struct drm_buf_entry {
- int buf_size; /**< size */
- int buf_count; /**< number of buffers */
- struct drm_buf *buflist; /**< buffer list */
- int seg_count;
- int page_order;
- struct drm_dma_handle **seglist;
-
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
-};
-
-/**
- * DMA data.
- */
-struct drm_device_dma {
-
- struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
- int buf_count; /**< total number of buffers */
- struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
- int seg_count;
- int page_count; /**< number of pages */
- unsigned long *pagelist; /**< page list */
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02,
- _DRM_DMA_USE_FB = 0x04,
- _DRM_DMA_USE_PCI_RO = 0x08
- } flags;
-
-};
-
-/**
- * Scatter-gather memory.
- */
-struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- struct page **pagelist;
- dma_addr_t *busaddr;
-};
-
-/**
- * Kernel side of a mapping
- */
-struct drm_local_map {
- dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- void *handle; /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- int mtrr; /**< MTRR slot used */
-};
-
-typedef struct drm_local_map drm_local_map_t;
-
-/**
- * Mappings list
- */
-struct drm_map_list {
- struct list_head head; /**< list head */
- struct drm_hash_item hash;
- struct drm_local_map *map; /**< mapping */
- uint64_t user_token;
- struct drm_master *master;
-};
-
-int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_p);
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
-void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
-int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
-int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
-
-/**
- * Test that the hardware lock is held by the caller, returning otherwise.
- *
- * \param dev DRM device.
- * \param filp file pointer of the caller.
- */
-#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
-do { \
- if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
- _file_priv->master->lock.file_priv != _file_priv) { \
- DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
- __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
- _file_priv->master->lock.file_priv, _file_priv); \
- return -EINVAL; \
- } \
-} while (0)
-
-void drm_legacy_idlelock_take(struct drm_lock_data *lock);
-void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-
-/* drm_irq.c */
-int drm_legacy_irq_uninstall(struct drm_device *dev);
-
-/* drm_pci.c */
-
-#ifdef CONFIG_PCI
-
-int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
- size_t size, size_t align)
-{
- return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
- struct drm_dma_handle *dmah)
-{
-}
-
-static inline int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
-}
-
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_head {
- struct agp_kern_info agp_info;
- struct list_head memory;
- unsigned long mode;
- struct agp_bridge_data *bridge;
- int enabled;
- int acquired;
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
-int drm_legacy_agp_acquire(struct drm_device *dev);
-int drm_legacy_agp_release(struct drm_device *dev);
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-#else
-static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
- return NULL;
-}
-
-static inline int drm_legacy_agp_acquire(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_release(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_enable(struct drm_device *dev,
- struct drm_agp_mode mode)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_info(struct drm_device *dev,
- struct drm_agp_info *info)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_alloc(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_free(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_unbind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_bind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-#endif
-
-/* drm_memory.c */
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-
-#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 816f196b3d..e8e0f8d39f 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -12,6 +12,7 @@
#include <drm/drm_device.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_format_conv_state;
struct drm_rect;
struct gpio_desc;
struct iosys_map;
@@ -192,7 +193,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
size_t len);
int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap);
+ struct drm_rect *clip, bool swap,
+ struct drm_format_conv_state *fmtcnv_state);
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 912f1e4156..08d7a7f018 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -60,7 +60,7 @@ struct drm_mode_object {
void (*free_cb)(struct kref *kref);
};
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
/**
* struct drm_object_properties - property tracking for &drm_mode_object
*/
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 159213786e..9ed4246954 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -134,7 +134,7 @@ struct drm_crtc_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -550,7 +550,7 @@ struct drm_encoder_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -1155,6 +1155,11 @@ struct drm_connector_helper_funcs {
* This operation is optional.
*
* This callback is used by the drm_kms_helper_poll_enable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
*/
void (*enable_hpd)(struct drm_connector *connector);
@@ -1166,6 +1171,11 @@ struct drm_connector_helper_funcs {
* This operation is optional.
*
* This callback is used by the drm_kms_helper_poll_disable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
*/
void (*disable_hpd)(struct drm_connector *connector);
};
@@ -1465,7 +1475,7 @@ struct drm_mode_config_helper_funcs {
* swapped into the various state pointers. The passed in state
* therefore contains copies of the old/previous state. This hook should
* commit the new state into hardware. Note that the helpers have
- * already waited for preceeding atomic commits and fences, but drivers
+ * already waited for preceding atomic commits and fences, but drivers
* can add more waiting calls at the start of their implementation, e.g.
* to wait for driver-internal request for implicit syncing, before
* starting to commit the update to the hardware.
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index fef775200a..641fe29805 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -116,6 +116,10 @@ struct drm_plane_state {
/** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
+ /** @hotspot_x: x offset to mouse cursor hotspot */
+ /** @hotspot_y: y offset to mouse cursor hotspot */
+ int32_t hotspot_x, hotspot_y;
+
/**
* @alpha:
* Opacity of the plane with 0 as completely transparent and 0xffff as
@@ -247,6 +251,13 @@ struct drm_plane_state {
/** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
+
+ /**
+ * @color_mgmt_changed: Color management properties have changed. Used
+ * by the atomic helpers and drivers to steer the atomic commit control
+ * flow.
+ */
+ bool color_mgmt_changed : 1;
};
static inline struct drm_rect
@@ -758,6 +769,16 @@ struct drm_plane {
* scaling.
*/
struct drm_property *scaling_filter_property;
+
+ /**
+ * @hotspot_x_property: property to set mouse hotspot x offset.
+ */
+ struct drm_property *hotspot_x_property;
+
+ /**
+ * @hotspot_y_property: property to set mouse hotspot y offset.
+ */
+ struct drm_property *hotspot_y_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 3a574e8cd2..75f9c48305 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -26,7 +26,6 @@
#include <linux/types.h>
-struct drm_atomic_state;
struct drm_crtc;
struct drm_framebuffer;
struct drm_modeset_acquire_ctx;
@@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr
int drm_plane_helper_disable_primary(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx);
void drm_plane_helper_destroy(struct drm_plane *plane);
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state);
/**
* DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index a93a387f8a..dd4883df87 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -453,7 +453,7 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
/* Helper for struct drm_device based logging. */
#define __drm_printk(drm, level, type, fmt, ...) \
- dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__)
+ dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__)
#define drm_info(drm, fmt, ...) \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 65bc9710a4..082f29156b 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -279,6 +279,12 @@ struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
const void *data);
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id);
+int drm_property_replace_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced);
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
size_t length,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d2fb81e341..5acc64954a 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -63,10 +63,10 @@ struct drm_file;
* to an array, and as such should start at 0.
*/
enum drm_sched_priority {
- DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_LOW,
DRM_SCHED_PRIORITY_COUNT
};
@@ -320,6 +320,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
+ * @credits: the number of credits this job contributes to the scheduler
* @work: Helper to reschdeule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
@@ -339,6 +340,8 @@ struct drm_sched_job {
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
+ u32 credits;
+
/*
* work is used only after finish_cb has been used and will not be
* accessed anymore.
@@ -462,29 +465,42 @@ struct drm_sched_backend_ops {
* and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
+
+ /**
+ * @update_job_credits: Called when the scheduler is considering this
+ * job for execution.
+ *
+ * This callback returns the number of credits the job would take if
+ * pushed to the hardware. Drivers may use this to dynamically update
+ * the job's credit count. For instance, deduct the number of credits
+ * for already signalled native fences.
+ *
+ * This callback is optional.
+ */
+ u32 (*update_job_credits)(struct drm_sched_job *sched_job);
};
/**
* struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
- * @hw_submission_limit: the max size of the hardware queue.
+ * @credit_limit: the credit limit of this scheduler
+ * @credit_count: the current credit count of this scheduler
* @timeout: the time after which a job is removed from the scheduler.
* @name: name of the ring for which this scheduler is being used.
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
* as there's usually one run-queue per priority, but could be less.
* @sched_rq: An allocated array of run-queues of size @num_rqs;
- * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
- * is ready to be scheduled.
* @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
- * @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
* @timeout_wq: workqueue used to queue @work_tdr
+ * @work_run_job: work which calls run_job op of each scheduler.
+ * @work_free_job: work which calls free_job op of each scheduler.
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
- * @thread: the kthread on which the scheduler which run.
* @pending_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
@@ -493,24 +509,27 @@ struct drm_sched_backend_ops {
* @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @pause_submit: pause queuing of @work_run_job on @submit_wq
+ * @own_submit_wq: scheduler owns allocation of @submit_wq
* @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
+ u32 credit_limit;
+ atomic_t credit_count;
long timeout;
const char *name;
u32 num_rqs;
struct drm_sched_rq **sched_rq;
- wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct workqueue_struct *submit_wq;
struct workqueue_struct *timeout_wq;
+ struct work_struct work_run_job;
+ struct work_struct work_free_job;
struct delayed_work work_tdr;
- struct task_struct *thread;
struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
@@ -518,19 +537,22 @@ struct drm_gpu_scheduler {
atomic_t _score;
bool ready;
bool free_guilty;
+ bool pause_submit;
+ bool own_submit_wq;
struct device *dev;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit,
+ struct workqueue_struct *submit_wq,
+ u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- void *owner);
+ u32 credits, void *owner);
void drm_sched_job_arm(struct drm_sched_job *job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
@@ -550,8 +572,12 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 1c9ea6ab3e..fcf1849aa4 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -738,7 +738,8 @@
INTEL_DG2_G12_IDS(info)
#define INTEL_ATS_M150_IDS(info) \
- INTEL_VGA_DEVICE(0x56C0, info)
+ INTEL_VGA_DEVICE(0x56C0, info), \
+ INTEL_VGA_DEVICE(0x56C2, info)
#define INTEL_ATS_M75_IDS(info) \
INTEL_VGA_DEVICE(0x56C1, info)
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 30a347e5aa..4490d43c63 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -74,7 +74,7 @@ struct ttm_pool {
bool use_dma32;
struct {
- struct ttm_pool_type orders[MAX_ORDER + 1];
+ struct ttm_pool_type orders[NR_PAGE_ORDERS];
} caching[TTM_NUM_CACHING_TYPES];
};
diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h
new file mode 100644
index 0000000000..de1a344737
--- /dev/null
+++ b/include/drm/xe_pciids.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PCIIDS_H_
+#define _XE_PCIIDS_H_
+
+/*
+ * Lists below can be turned into initializers for a struct pci_device_id
+ * by defining INTEL_VGA_DEVICE:
+ *
+ * #define INTEL_VGA_DEVICE(id, info) { \
+ * 0x8086, id, \
+ * ~0, ~0, \
+ * 0x030000, 0xff0000, \
+ * (unsigned long) info }
+ *
+ * And then calling like:
+ *
+ * XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__)
+ *
+ * To turn them into something else, just provide a different macro passed as
+ * first argument.
+ */
+
+/* TGL */
+#define XE_TGL_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9A60, ## __VA_ARGS__), \
+ MACRO__(0x9A68, ## __VA_ARGS__), \
+ MACRO__(0x9A70, ## __VA_ARGS__)
+
+#define XE_TGL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9A40, ## __VA_ARGS__), \
+ MACRO__(0x9A49, ## __VA_ARGS__), \
+ MACRO__(0x9A59, ## __VA_ARGS__), \
+ MACRO__(0x9A78, ## __VA_ARGS__), \
+ MACRO__(0x9AC0, ## __VA_ARGS__), \
+ MACRO__(0x9AC9, ## __VA_ARGS__), \
+ MACRO__(0x9AD9, ## __VA_ARGS__), \
+ MACRO__(0x9AF8, ## __VA_ARGS__)
+
+#define XE_TGL_IDS(MACRO__, ...) \
+ XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* RKL */
+#define XE_RKL_IDS(MACRO__, ...) \
+ MACRO__(0x4C80, ## __VA_ARGS__), \
+ MACRO__(0x4C8A, ## __VA_ARGS__), \
+ MACRO__(0x4C8B, ## __VA_ARGS__), \
+ MACRO__(0x4C8C, ## __VA_ARGS__), \
+ MACRO__(0x4C90, ## __VA_ARGS__), \
+ MACRO__(0x4C9A, ## __VA_ARGS__)
+
+/* DG1 */
+#define XE_DG1_IDS(MACRO__, ...) \
+ MACRO__(0x4905, ## __VA_ARGS__), \
+ MACRO__(0x4906, ## __VA_ARGS__), \
+ MACRO__(0x4907, ## __VA_ARGS__), \
+ MACRO__(0x4908, ## __VA_ARGS__), \
+ MACRO__(0x4909, ## __VA_ARGS__)
+
+/* ADL-S */
+#define XE_ADLS_IDS(MACRO__, ...) \
+ MACRO__(0x4680, ## __VA_ARGS__), \
+ MACRO__(0x4682, ## __VA_ARGS__), \
+ MACRO__(0x4688, ## __VA_ARGS__), \
+ MACRO__(0x468A, ## __VA_ARGS__), \
+ MACRO__(0x468B, ## __VA_ARGS__), \
+ MACRO__(0x4690, ## __VA_ARGS__), \
+ MACRO__(0x4692, ## __VA_ARGS__), \
+ MACRO__(0x4693, ## __VA_ARGS__)
+
+/* ADL-P */
+#define XE_ADLP_IDS(MACRO__, ...) \
+ MACRO__(0x46A0, ## __VA_ARGS__), \
+ MACRO__(0x46A1, ## __VA_ARGS__), \
+ MACRO__(0x46A2, ## __VA_ARGS__), \
+ MACRO__(0x46A3, ## __VA_ARGS__), \
+ MACRO__(0x46A6, ## __VA_ARGS__), \
+ MACRO__(0x46A8, ## __VA_ARGS__), \
+ MACRO__(0x46AA, ## __VA_ARGS__), \
+ MACRO__(0x462A, ## __VA_ARGS__), \
+ MACRO__(0x4626, ## __VA_ARGS__), \
+ MACRO__(0x4628, ## __VA_ARGS__), \
+ MACRO__(0x46B0, ## __VA_ARGS__), \
+ MACRO__(0x46B1, ## __VA_ARGS__), \
+ MACRO__(0x46B2, ## __VA_ARGS__), \
+ MACRO__(0x46B3, ## __VA_ARGS__), \
+ MACRO__(0x46C0, ## __VA_ARGS__), \
+ MACRO__(0x46C1, ## __VA_ARGS__), \
+ MACRO__(0x46C2, ## __VA_ARGS__), \
+ MACRO__(0x46C3, ## __VA_ARGS__)
+
+/* ADL-N */
+#define XE_ADLN_IDS(MACRO__, ...) \
+ MACRO__(0x46D0, ## __VA_ARGS__), \
+ MACRO__(0x46D1, ## __VA_ARGS__), \
+ MACRO__(0x46D2, ## __VA_ARGS__)
+
+/* RPL-S */
+#define XE_RPLS_IDS(MACRO__, ...) \
+ MACRO__(0xA780, ## __VA_ARGS__), \
+ MACRO__(0xA781, ## __VA_ARGS__), \
+ MACRO__(0xA782, ## __VA_ARGS__), \
+ MACRO__(0xA783, ## __VA_ARGS__), \
+ MACRO__(0xA788, ## __VA_ARGS__), \
+ MACRO__(0xA789, ## __VA_ARGS__), \
+ MACRO__(0xA78A, ## __VA_ARGS__), \
+ MACRO__(0xA78B, ## __VA_ARGS__)
+
+/* RPL-U */
+#define XE_RPLU_IDS(MACRO__, ...) \
+ MACRO__(0xA721, ## __VA_ARGS__), \
+ MACRO__(0xA7A1, ## __VA_ARGS__), \
+ MACRO__(0xA7A9, ## __VA_ARGS__), \
+ MACRO__(0xA7AC, ## __VA_ARGS__), \
+ MACRO__(0xA7AD, ## __VA_ARGS__)
+
+/* RPL-P */
+#define XE_RPLP_IDS(MACRO__, ...) \
+ XE_RPLU_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0xA720, ## __VA_ARGS__), \
+ MACRO__(0xA7A0, ## __VA_ARGS__), \
+ MACRO__(0xA7A8, ## __VA_ARGS__), \
+ MACRO__(0xA7AA, ## __VA_ARGS__), \
+ MACRO__(0xA7AB, ## __VA_ARGS__)
+
+/* DG2 */
+#define XE_DG2_G10_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__), \
+ MACRO__(0x56A0, ## __VA_ARGS__), \
+ MACRO__(0x56A1, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__)
+
+#define XE_DG2_G11_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__), \
+ MACRO__(0x56A5, ## __VA_ARGS__), \
+ MACRO__(0x56A6, ## __VA_ARGS__), \
+ MACRO__(0x56B0, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__), \
+ MACRO__(0x56BA, ## __VA_ARGS__), \
+ MACRO__(0x56BB, ## __VA_ARGS__), \
+ MACRO__(0x56BC, ## __VA_ARGS__), \
+ MACRO__(0x56BD, ## __VA_ARGS__)
+
+#define XE_DG2_G12_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__), \
+ MACRO__(0x56A3, ## __VA_ARGS__), \
+ MACRO__(0x56A4, ## __VA_ARGS__), \
+ MACRO__(0x56B2, ## __VA_ARGS__), \
+ MACRO__(0x56B3, ## __VA_ARGS__)
+
+#define XE_DG2_IDS(MACRO__, ...) \
+ XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
+
+#define XE_ATS_M150_IDS(MACRO__, ...) \
+ MACRO__(0x56C0, ## __VA_ARGS__), \
+ MACRO__(0x56C2, ## __VA_ARGS__)
+
+#define XE_ATS_M75_IDS(MACRO__, ...) \
+ MACRO__(0x56C1, ## __VA_ARGS__)
+
+#define XE_ATS_M_IDS(MACRO__, ...) \
+ XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+
+/* MTL / ARL */
+#define XE_MTL_IDS(MACRO__, ...) \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__), \
+ MACRO__(0x7D55, ## __VA_ARGS__), \
+ MACRO__(0x7D60, ## __VA_ARGS__), \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0x7DD5, ## __VA_ARGS__)
+
+#define XE_LNL_IDS(MACRO__, ...) \
+ MACRO__(0x6420, ## __VA_ARGS__), \
+ MACRO__(0x64A0, ## __VA_ARGS__), \
+ MACRO__(0x64B0, ## __VA_ARGS__)
+
+#endif
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index f7248348a4..51e0f60594 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -255,6 +255,7 @@
#define QCOM_ID_SA8775P 534
#define QCOM_ID_QRU1000 539
#define QCOM_ID_QDU1000 545
+#define QCOM_ID_SM8650 557
#define QCOM_ID_SM4450 568
#define QCOM_ID_QDU1010 587
#define QCOM_ID_QRU1032 588
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index 387767f4e2..fd09819da2 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -279,5 +279,13 @@
#define CLKID_MIPI_DSI_PXCLK_DIV 268
#define CLKID_MIPI_DSI_PXCLK_SEL 269
#define CLKID_MIPI_DSI_PXCLK 270
+#define CLKID_CTS_ENCL 271
+#define CLKID_CTS_ENCL_SEL 272
+#define CLKID_MIPI_ISP_DIV 273
+#define CLKID_MIPI_ISP_SEL 274
+#define CLKID_MIPI_ISP 275
+#define CLKID_MIPI_ISP_GATE 276
+#define CLKID_MIPI_ISP_CSI_PHY0 277
+#define CLKID_MIPI_ISP_CSI_PHY1 278
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h
new file mode 100644
index 0000000000..21adec2238
--- /dev/null
+++ b/include/dt-bindings/clock/google,gs101.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Device Tree binding constants for Google gs101 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+#define _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+
+/* CMU_TOP PLL */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SPARE_PLL 5
+
+/* CMU_TOP MUX */
+#define CLK_MOUT_PLL_SHARED0 6
+#define CLK_MOUT_PLL_SHARED1 7
+#define CLK_MOUT_PLL_SHARED2 8
+#define CLK_MOUT_PLL_SHARED3 9
+#define CLK_MOUT_PLL_SPARE 10
+#define CLK_MOUT_CMU_BO_BUS 11
+#define CLK_MOUT_CMU_BUS0_BUS 12
+#define CLK_MOUT_CMU_BUS1_BUS 13
+#define CLK_MOUT_CMU_BUS2_BUS 14
+#define CLK_MOUT_CMU_CIS_CLK0 15
+#define CLK_MOUT_CMU_CIS_CLK1 16
+#define CLK_MOUT_CMU_CIS_CLK2 17
+#define CLK_MOUT_CMU_CIS_CLK3 18
+#define CLK_MOUT_CMU_CIS_CLK4 19
+#define CLK_MOUT_CMU_CIS_CLK5 20
+#define CLK_MOUT_CMU_CIS_CLK6 21
+#define CLK_MOUT_CMU_CIS_CLK7 22
+#define CLK_MOUT_CMU_CMU_BOOST 23
+#define CLK_MOUT_CMU_BOOST_OPTION1 24
+#define CLK_MOUT_CMU_CORE_BUS 25
+#define CLK_MOUT_CMU_CPUCL0_DBG 26
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 27
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 28
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 29
+#define CLK_MOUT_CMU_CSIS_BUS 30
+#define CLK_MOUT_CMU_DISP_BUS 31
+#define CLK_MOUT_CMU_DNS_BUS 32
+#define CLK_MOUT_CMU_DPU_BUS 33
+#define CLK_MOUT_CMU_EH_BUS 34
+#define CLK_MOUT_CMU_G2D_G2D 35
+#define CLK_MOUT_CMU_G2D_MSCL 36
+#define CLK_MOUT_CMU_G3AA_G3AA 37
+#define CLK_MOUT_CMU_G3D_BUSD 38
+#define CLK_MOUT_CMU_G3D_GLB 39
+#define CLK_MOUT_CMU_G3D_SWITCH 40
+#define CLK_MOUT_CMU_GDC_GDC0 41
+#define CLK_MOUT_CMU_GDC_GDC1 42
+#define CLK_MOUT_CMU_GDC_SCSC 43
+#define CLK_MOUT_CMU_HPM 44
+#define CLK_MOUT_CMU_HSI0_BUS 45
+#define CLK_MOUT_CMU_HSI0_DPGTC 46
+#define CLK_MOUT_CMU_HSI0_USB31DRD 47
+#define CLK_MOUT_CMU_HSI0_USBDPDBG 48
+#define CLK_MOUT_CMU_HSI1_BUS 49
+#define CLK_MOUT_CMU_HSI1_PCIE 50
+#define CLK_MOUT_CMU_HSI2_BUS 51
+#define CLK_MOUT_CMU_HSI2_MMC_CARD 52
+#define CLK_MOUT_CMU_HSI2_PCIE 53
+#define CLK_MOUT_CMU_HSI2_UFS_EMBD 54
+#define CLK_MOUT_CMU_IPP_BUS 55
+#define CLK_MOUT_CMU_ITP_BUS 56
+#define CLK_MOUT_CMU_MCSC_ITSC 57
+#define CLK_MOUT_CMU_MCSC_MCSC 58
+#define CLK_MOUT_CMU_MFC_MFC 59
+#define CLK_MOUT_CMU_MIF_BUSP 60
+#define CLK_MOUT_CMU_MIF_SWITCH 61
+#define CLK_MOUT_CMU_MISC_BUS 62
+#define CLK_MOUT_CMU_MISC_SSS 63
+#define CLK_MOUT_CMU_PDP_BUS 64
+#define CLK_MOUT_CMU_PDP_VRA 65
+#define CLK_MOUT_CMU_PERIC0_BUS 66
+#define CLK_MOUT_CMU_PERIC0_IP 67
+#define CLK_MOUT_CMU_PERIC1_BUS 68
+#define CLK_MOUT_CMU_PERIC1_IP 69
+#define CLK_MOUT_CMU_TNR_BUS 70
+#define CLK_MOUT_CMU_TOP_BOOST_OPTION1 71
+#define CLK_MOUT_CMU_TOP_CMUREF 72
+#define CLK_MOUT_CMU_TPU_BUS 73
+#define CLK_MOUT_CMU_TPU_TPU 74
+#define CLK_MOUT_CMU_TPU_TPUCTL 75
+#define CLK_MOUT_CMU_TPU_UART 76
+#define CLK_MOUT_CMU_CMUREF 77
+
+/* CMU_TOP Dividers */
+#define CLK_DOUT_CMU_BO_BUS 78
+#define CLK_DOUT_CMU_BUS0_BUS 79
+#define CLK_DOUT_CMU_BUS1_BUS 80
+#define CLK_DOUT_CMU_BUS2_BUS 81
+#define CLK_DOUT_CMU_CIS_CLK0 82
+#define CLK_DOUT_CMU_CIS_CLK1 83
+#define CLK_DOUT_CMU_CIS_CLK2 84
+#define CLK_DOUT_CMU_CIS_CLK3 85
+#define CLK_DOUT_CMU_CIS_CLK4 86
+#define CLK_DOUT_CMU_CIS_CLK5 87
+#define CLK_DOUT_CMU_CIS_CLK6 88
+#define CLK_DOUT_CMU_CIS_CLK7 89
+#define CLK_DOUT_CMU_CORE_BUS 90
+#define CLK_DOUT_CMU_CPUCL0_DBG 91
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 92
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 93
+#define CLK_DOUT_CMU_CPUCL2_SWITCH 94
+#define CLK_DOUT_CMU_CSIS_BUS 95
+#define CLK_DOUT_CMU_DISP_BUS 96
+#define CLK_DOUT_CMU_DNS_BUS 97
+#define CLK_DOUT_CMU_DPU_BUS 98
+#define CLK_DOUT_CMU_EH_BUS 99
+#define CLK_DOUT_CMU_G2D_G2D 100
+#define CLK_DOUT_CMU_G2D_MSCL 101
+#define CLK_DOUT_CMU_G3AA_G3AA 102
+#define CLK_DOUT_CMU_G3D_BUSD 103
+#define CLK_DOUT_CMU_G3D_GLB 104
+#define CLK_DOUT_CMU_G3D_SWITCH 105
+#define CLK_DOUT_CMU_GDC_GDC0 106
+#define CLK_DOUT_CMU_GDC_GDC1 107
+#define CLK_DOUT_CMU_GDC_SCSC 108
+#define CLK_DOUT_CMU_CMU_HPM 109
+#define CLK_DOUT_CMU_HSI0_BUS 110
+#define CLK_DOUT_CMU_HSI0_DPGTC 111
+#define CLK_DOUT_CMU_HSI0_USB31DRD 112
+#define CLK_DOUT_CMU_HSI0_USBDPDBG 113
+#define CLK_DOUT_CMU_HSI1_BUS 114
+#define CLK_DOUT_CMU_HSI1_PCIE 115
+#define CLK_DOUT_CMU_HSI2_BUS 116
+#define CLK_DOUT_CMU_HSI2_MMC_CARD 117
+#define CLK_DOUT_CMU_HSI2_PCIE 118
+#define CLK_DOUT_CMU_HSI2_UFS_EMBD 119
+#define CLK_DOUT_CMU_IPP_BUS 120
+#define CLK_DOUT_CMU_ITP_BUS 121
+#define CLK_DOUT_CMU_MCSC_ITSC 122
+#define CLK_DOUT_CMU_MCSC_MCSC 123
+#define CLK_DOUT_CMU_MFC_MFC 124
+#define CLK_DOUT_CMU_MIF_BUSP 125
+#define CLK_DOUT_CMU_MISC_BUS 126
+#define CLK_DOUT_CMU_MISC_SSS 127
+#define CLK_DOUT_CMU_OTP 128
+#define CLK_DOUT_CMU_PDP_BUS 129
+#define CLK_DOUT_CMU_PDP_VRA 130
+#define CLK_DOUT_CMU_PERIC0_BUS 131
+#define CLK_DOUT_CMU_PERIC0_IP 132
+#define CLK_DOUT_CMU_PERIC1_BUS 133
+#define CLK_DOUT_CMU_PERIC1_IP 134
+#define CLK_DOUT_CMU_TNR_BUS 135
+#define CLK_DOUT_CMU_TPU_BUS 136
+#define CLK_DOUT_CMU_TPU_TPU 137
+#define CLK_DOUT_CMU_TPU_TPUCTL 138
+#define CLK_DOUT_CMU_TPU_UART 139
+#define CLK_DOUT_CMU_CMU_BOOST 140
+#define CLK_DOUT_CMU_CMU_CMUREF 141
+#define CLK_DOUT_CMU_SHARED0_DIV2 142
+#define CLK_DOUT_CMU_SHARED0_DIV3 143
+#define CLK_DOUT_CMU_SHARED0_DIV4 144
+#define CLK_DOUT_CMU_SHARED0_DIV5 145
+#define CLK_DOUT_CMU_SHARED1_DIV2 146
+#define CLK_DOUT_CMU_SHARED1_DIV3 147
+#define CLK_DOUT_CMU_SHARED1_DIV4 148
+#define CLK_DOUT_CMU_SHARED2_DIV2 149
+#define CLK_DOUT_CMU_SHARED3_DIV2 150
+
+/* CMU_TOP Gates */
+#define CLK_GOUT_CMU_BUS0_BOOST 151
+#define CLK_GOUT_CMU_BUS1_BOOST 152
+#define CLK_GOUT_CMU_BUS2_BOOST 153
+#define CLK_GOUT_CMU_CORE_BOOST 154
+#define CLK_GOUT_CMU_CPUCL0_BOOST 155
+#define CLK_GOUT_CMU_CPUCL1_BOOST 156
+#define CLK_GOUT_CMU_CPUCL2_BOOST 157
+#define CLK_GOUT_CMU_MIF_BOOST 158
+#define CLK_GOUT_CMU_MIF_SWITCH 159
+#define CLK_GOUT_CMU_BO_BUS 160
+#define CLK_GOUT_CMU_BUS0_BUS 161
+#define CLK_GOUT_CMU_BUS1_BUS 162
+#define CLK_GOUT_CMU_BUS2_BUS 163
+#define CLK_GOUT_CMU_CIS_CLK0 164
+#define CLK_GOUT_CMU_CIS_CLK1 165
+#define CLK_GOUT_CMU_CIS_CLK2 166
+#define CLK_GOUT_CMU_CIS_CLK3 167
+#define CLK_GOUT_CMU_CIS_CLK4 168
+#define CLK_GOUT_CMU_CIS_CLK5 169
+#define CLK_GOUT_CMU_CIS_CLK6 170
+#define CLK_GOUT_CMU_CIS_CLK7 171
+#define CLK_GOUT_CMU_CMU_BOOST 172
+#define CLK_GOUT_CMU_CORE_BUS 173
+#define CLK_GOUT_CMU_CPUCL0_DBG 174
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 175
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 176
+#define CLK_GOUT_CMU_CPUCL2_SWITCH 177
+#define CLK_GOUT_CMU_CSIS_BUS 178
+#define CLK_GOUT_CMU_DISP_BUS 179
+#define CLK_GOUT_CMU_DNS_BUS 180
+#define CLK_GOUT_CMU_DPU_BUS 181
+#define CLK_GOUT_CMU_EH_BUS 182
+#define CLK_GOUT_CMU_G2D_G2D 183
+#define CLK_GOUT_CMU_G2D_MSCL 184
+#define CLK_GOUT_CMU_G3AA_G3AA 185
+#define CLK_GOUT_CMU_G3D_BUSD 186
+#define CLK_GOUT_CMU_G3D_GLB 187
+#define CLK_GOUT_CMU_G3D_SWITCH 188
+#define CLK_GOUT_CMU_GDC_GDC0 189
+#define CLK_GOUT_CMU_GDC_GDC1 190
+#define CLK_GOUT_CMU_GDC_SCSC 191
+#define CLK_GOUT_CMU_HPM 192
+#define CLK_GOUT_CMU_HSI0_BUS 193
+#define CLK_GOUT_CMU_HSI0_DPGTC 194
+#define CLK_GOUT_CMU_HSI0_USB31DRD 195
+#define CLK_GOUT_CMU_HSI0_USBDPDBG 196
+#define CLK_GOUT_CMU_HSI1_BUS 197
+#define CLK_GOUT_CMU_HSI1_PCIE 198
+#define CLK_GOUT_CMU_HSI2_BUS 199
+#define CLK_GOUT_CMU_HSI2_MMC_CARD 200
+#define CLK_GOUT_CMU_HSI2_PCIE 201
+#define CLK_GOUT_CMU_HSI2_UFS_EMBD 202
+#define CLK_GOUT_CMU_IPP_BUS 203
+#define CLK_GOUT_CMU_ITP_BUS 204
+#define CLK_GOUT_CMU_MCSC_ITSC 205
+#define CLK_GOUT_CMU_MCSC_MCSC 206
+#define CLK_GOUT_CMU_MFC_MFC 207
+#define CLK_GOUT_CMU_MIF_BUSP 208
+#define CLK_GOUT_CMU_MISC_BUS 209
+#define CLK_GOUT_CMU_MISC_SSS 210
+#define CLK_GOUT_CMU_PDP_BUS 211
+#define CLK_GOUT_CMU_PDP_VRA 212
+#define CLK_GOUT_CMU_G3AA 213
+#define CLK_GOUT_CMU_PERIC0_BUS 214
+#define CLK_GOUT_CMU_PERIC0_IP 215
+#define CLK_GOUT_CMU_PERIC1_BUS 216
+#define CLK_GOUT_CMU_PERIC1_IP 217
+#define CLK_GOUT_CMU_TNR_BUS 218
+#define CLK_GOUT_CMU_TOP_CMUREF 219
+#define CLK_GOUT_CMU_TPU_BUS 220
+#define CLK_GOUT_CMU_TPU_TPU 221
+#define CLK_GOUT_CMU_TPU_TPUCTL 222
+#define CLK_GOUT_CMU_TPU_UART 223
+
+/* CMU_APM */
+#define CLK_MOUT_APM_FUNC 1
+#define CLK_MOUT_APM_FUNCSRC 2
+#define CLK_DOUT_APM_BOOST 3
+#define CLK_DOUT_APM_USI0_UART 4
+#define CLK_DOUT_APM_USI0_USI 5
+#define CLK_DOUT_APM_USI1_UART 6
+#define CLK_GOUT_APM_APM_CMU_APM_PCLK 7
+#define CLK_GOUT_BUS0_BOOST_OPTION1 8
+#define CLK_GOUT_CMU_BOOST_OPTION1 9
+#define CLK_GOUT_CORE_BOOST_OPTION1 10
+#define CLK_GOUT_APM_FUNC 11
+#define CLK_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 12
+#define CLK_GOUT_APM_APBIF_GPIO_FAR_ALIVE_PCLK 13
+#define CLK_GOUT_APM_APBIF_PMU_ALIVE_PCLK 14
+#define CLK_GOUT_APM_APBIF_RTC_PCLK 15
+#define CLK_GOUT_APM_APBIF_TRTC_PCLK 16
+#define CLK_GOUT_APM_APM_USI0_UART_IPCLK 17
+#define CLK_GOUT_APM_APM_USI0_UART_PCLK 18
+#define CLK_GOUT_APM_APM_USI0_USI_IPCLK 19
+#define CLK_GOUT_APM_APM_USI0_USI_PCLK 20
+#define CLK_GOUT_APM_APM_USI1_UART_IPCLK 21
+#define CLK_GOUT_APM_APM_USI1_UART_PCLK 22
+#define CLK_GOUT_APM_D_TZPC_APM_PCLK 23
+#define CLK_GOUT_APM_GPC_APM_PCLK 24
+#define CLK_GOUT_APM_GREBEINTEGRATION_HCLK 25
+#define CLK_GOUT_APM_INTMEM_ACLK 26
+#define CLK_GOUT_APM_INTMEM_PCLK 27
+#define CLK_GOUT_APM_LHM_AXI_G_SWD_I_CLK 28
+#define CLK_GOUT_APM_LHM_AXI_P_AOCAPM_I_CLK 29
+#define CLK_GOUT_APM_LHM_AXI_P_APM_I_CLK 30
+#define CLK_GOUT_APM_LHS_AXI_D_APM_I_CLK 31
+#define CLK_GOUT_APM_LHS_AXI_G_DBGCORE_I_CLK 32
+#define CLK_GOUT_APM_LHS_AXI_G_SCAN2DRAM_I_CLK 33
+#define CLK_GOUT_APM_MAILBOX_APM_AOC_PCLK 34
+#define CLK_GOUT_APM_MAILBOX_APM_AP_PCLK 35
+#define CLK_GOUT_APM_MAILBOX_APM_GSA_PCLK 36
+#define CLK_GOUT_APM_MAILBOX_APM_SWD_PCLK 37
+#define CLK_GOUT_APM_MAILBOX_APM_TPU_PCLK 38
+#define CLK_GOUT_APM_MAILBOX_AP_AOC_PCLK 39
+#define CLK_GOUT_APM_MAILBOX_AP_DBGCORE_PCLK 40
+#define CLK_GOUT_APM_PMU_INTR_GEN_PCLK 41
+#define CLK_GOUT_APM_ROM_CRC32_HOST_ACLK 42
+#define CLK_GOUT_APM_ROM_CRC32_HOST_PCLK 43
+#define CLK_GOUT_APM_CLK_APM_BUS_CLK 44
+#define CLK_GOUT_APM_CLK_APM_USI0_UART_CLK 45
+#define CLK_GOUT_APM_CLK_APM_USI0_USI_CLK 46
+#define CLK_GOUT_APM_CLK_APM_USI1_UART_CLK 47
+#define CLK_GOUT_APM_SPEEDY_APM_PCLK 48
+#define CLK_GOUT_APM_SPEEDY_SUB_APM_PCLK 49
+#define CLK_GOUT_APM_SSMT_D_APM_ACLK 50
+#define CLK_GOUT_APM_SSMT_D_APM_PCLK 51
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_ACLK 52
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_PCLK 53
+#define CLK_GOUT_APM_SS_DBGCORE_SS_DBGCORE_HCLK 54
+#define CLK_GOUT_APM_SYSMMU_D_APM_CLK_S2 55
+#define CLK_GOUT_APM_SYSREG_APM_PCLK 56
+#define CLK_GOUT_APM_UASC_APM_ACLK 57
+#define CLK_GOUT_APM_UASC_APM_PCLK 58
+#define CLK_GOUT_APM_UASC_DBGCORE_ACLK 59
+#define CLK_GOUT_APM_UASC_DBGCORE_PCLK 60
+#define CLK_GOUT_APM_UASC_G_SWD_ACLK 61
+#define CLK_GOUT_APM_UASC_G_SWD_PCLK 62
+#define CLK_GOUT_APM_UASC_P_AOCAPM_ACLK 63
+#define CLK_GOUT_APM_UASC_P_AOCAPM_PCLK 64
+#define CLK_GOUT_APM_UASC_P_APM_ACLK 65
+#define CLK_GOUT_APM_UASC_P_APM_PCLK 66
+#define CLK_GOUT_APM_WDT_APM_PCLK 67
+#define CLK_GOUT_APM_XIU_DP_APM_ACLK 68
+#define CLK_APM_PLL_DIV2_APM 69
+#define CLK_APM_PLL_DIV4_APM 70
+#define CLK_APM_PLL_DIV16_APM 71
+
+/* CMU_MISC */
+#define CLK_MOUT_MISC_BUS_USER 1
+#define CLK_MOUT_MISC_SSS_USER 2
+#define CLK_MOUT_MISC_GIC 3
+#define CLK_DOUT_MISC_BUSP 4
+#define CLK_DOUT_MISC_GIC 5
+#define CLK_GOUT_MISC_MISC_CMU_MISC_PCLK 6
+#define CLK_GOUT_MISC_OTP_CON_BIRA_I_OSCCLK 7
+#define CLK_GOUT_MISC_OTP_CON_BISR_I_OSCCLK 8
+#define CLK_GOUT_MISC_OTP_CON_TOP_I_OSCCLK 9
+#define CLK_GOUT_MISC_CLK_MISC_OSCCLK_CLK 10
+#define CLK_GOUT_MISC_ADM_AHB_SSS_HCLKM 11
+#define CLK_GOUT_MISC_AD_APB_DIT_PCLKM 12
+#define CLK_GOUT_MISC_AD_APB_PUF_PCLKM 13
+#define CLK_GOUT_MISC_DIT_ICLKL2A 14
+#define CLK_GOUT_MISC_D_TZPC_MISC_PCLK 15
+#define CLK_GOUT_MISC_GIC_GICCLK 16
+#define CLK_GOUT_MISC_GPC_MISC_PCLK 17
+#define CLK_GOUT_MISC_LHM_AST_ICC_CPUGIC_I_CLK 18
+#define CLK_GOUT_MISC_LHM_AXI_D_SSS_I_CLK 19
+#define CLK_GOUT_MISC_LHM_AXI_P_GIC_I_CLK 20
+#define CLK_GOUT_MISC_LHM_AXI_P_MISC_I_CLK 21
+#define CLK_GOUT_MISC_LHS_ACEL_D_MISC_I_CLK 22
+#define CLK_GOUT_MISC_LHS_AST_IRI_GICCPU_I_CLK 23
+#define CLK_GOUT_MISC_LHS_AXI_D_SSS_I_CLK 24
+#define CLK_GOUT_MISC_MCT_PCLK 25
+#define CLK_GOUT_MISC_OTP_CON_BIRA_PCLK 26
+#define CLK_GOUT_MISC_OTP_CON_BISR_PCLK 27
+#define CLK_GOUT_MISC_OTP_CON_TOP_PCLK 28
+#define CLK_GOUT_MISC_PDMA_ACLK 29
+#define CLK_GOUT_MISC_PPMU_DMA_ACLK 30
+#define CLK_GOUT_MISC_PPMU_MISC_ACLK 31
+#define CLK_GOUT_MISC_PPMU_MISC_PCLK 32
+#define CLK_GOUT_MISC_PUF_I_CLK 33
+#define CLK_GOUT_MISC_QE_DIT_ACLK 34
+#define CLK_GOUT_MISC_QE_DIT_PCLK 35
+#define CLK_GOUT_MISC_QE_PDMA_ACLK 36
+#define CLK_GOUT_MISC_QE_PDMA_PCLK 37
+#define CLK_GOUT_MISC_QE_PPMU_DMA_ACLK 38
+#define CLK_GOUT_MISC_QE_PPMU_DMA_PCLK 39
+#define CLK_GOUT_MISC_QE_RTIC_ACLK 40
+#define CLK_GOUT_MISC_QE_RTIC_PCLK 41
+#define CLK_GOUT_MISC_QE_SPDMA_ACLK 42
+#define CLK_GOUT_MISC_QE_SPDMA_PCLK 43
+#define CLK_GOUT_MISC_QE_SSS_ACLK 44
+#define CLK_GOUT_MISC_QE_SSS_PCLK 45
+#define CLK_GOUT_MISC_CLK_MISC_BUSD_CLK 46
+#define CLK_GOUT_MISC_CLK_MISC_BUSP_CLK 47
+#define CLK_GOUT_MISC_CLK_MISC_GIC_CLK 48
+#define CLK_GOUT_MISC_CLK_MISC_SSS_CLK 49
+#define CLK_GOUT_MISC_RTIC_I_ACLK 50
+#define CLK_GOUT_MISC_RTIC_I_PCLK 51
+#define CLK_GOUT_MISC_SPDMA_ACLK 52
+#define CLK_GOUT_MISC_SSMT_DIT_ACLK 53
+#define CLK_GOUT_MISC_SSMT_DIT_PCLK 54
+#define CLK_GOUT_MISC_SSMT_PDMA_ACLK 55
+#define CLK_GOUT_MISC_SSMT_PDMA_PCLK 56
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_ACLK 57
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_PCLK 58
+#define CLK_GOUT_MISC_SSMT_RTIC_ACLK 59
+#define CLK_GOUT_MISC_SSMT_RTIC_PCLK 60
+#define CLK_GOUT_MISC_SSMT_SPDMA_ACLK 61
+#define CLK_GOUT_MISC_SSMT_SPDMA_PCLK 62
+#define CLK_GOUT_MISC_SSMT_SSS_ACLK 63
+#define CLK_GOUT_MISC_SSMT_SSS_PCLK 64
+#define CLK_GOUT_MISC_SSS_I_ACLK 65
+#define CLK_GOUT_MISC_SSS_I_PCLK 66
+#define CLK_GOUT_MISC_SYSMMU_MISC_CLK_S2 67
+#define CLK_GOUT_MISC_SYSMMU_SSS_CLK_S1 68
+#define CLK_GOUT_MISC_SYSREG_MISC_PCLK 69
+#define CLK_GOUT_MISC_TMU_SUB_PCLK 70
+#define CLK_GOUT_MISC_TMU_TOP_PCLK 71
+#define CLK_GOUT_MISC_WDT_CLUSTER0_PCLK 72
+#define CLK_GOUT_MISC_WDT_CLUSTER1_PCLK 73
+#define CLK_GOUT_MISC_XIU_D_MISC_ACLK 74
+
+#endif /* _DT_BINDINGS_CLOCK_GOOGLE_GS101_H */
diff --git a/include/dt-bindings/clock/mediatek,mt7988-clk.h b/include/dt-bindings/clock/mediatek,mt7988-clk.h
new file mode 100644
index 0000000000..63376e40f1
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt7988-clk.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7988_H
+#define _DT_BINDINGS_CLK_MT7988_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NETSYSPLL 0
+#define CLK_APMIXED_MPLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_APLL2 3
+#define CLK_APMIXED_NET1PLL 4
+#define CLK_APMIXED_NET2PLL 5
+#define CLK_APMIXED_WEDMCUPLL 6
+#define CLK_APMIXED_SGMPLL 7
+#define CLK_APMIXED_ARM_B 8
+#define CLK_APMIXED_CCIPLL2_B 9
+#define CLK_APMIXED_USXGMIIPLL 10
+#define CLK_APMIXED_MSDCPLL 11
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D3_D2 5
+#define CLK_TOP_MPLL_D4 6
+#define CLK_TOP_MPLL_D8 7
+#define CLK_TOP_MPLL_D8_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D3_D5 10
+#define CLK_TOP_MMPLL_D4 11
+#define CLK_TOP_MMPLL_D6_D2 12
+#define CLK_TOP_MMPLL_D8 13
+#define CLK_TOP_APLL2_D4 14
+#define CLK_TOP_NET1PLL_D4 15
+#define CLK_TOP_NET1PLL_D5 16
+#define CLK_TOP_NET1PLL_D5_D2 17
+#define CLK_TOP_NET1PLL_D5_D4 18
+#define CLK_TOP_NET1PLL_D8 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET1PLL_D8_D8 22
+#define CLK_TOP_NET1PLL_D8_D16 23
+#define CLK_TOP_NET2PLL_D2 24
+#define CLK_TOP_NET2PLL_D4 25
+#define CLK_TOP_NET2PLL_D4_D4 26
+#define CLK_TOP_NET2PLL_D4_D8 27
+#define CLK_TOP_NET2PLL_D6 28
+#define CLK_TOP_NET2PLL_D8 29
+#define CLK_TOP_NETSYS_SEL 30
+#define CLK_TOP_NETSYS_500M_SEL 31
+#define CLK_TOP_NETSYS_2X_SEL 32
+#define CLK_TOP_NETSYS_GSW_SEL 33
+#define CLK_TOP_ETH_GMII_SEL 34
+#define CLK_TOP_NETSYS_MCU_SEL 35
+#define CLK_TOP_NETSYS_PAO_2X_SEL 36
+#define CLK_TOP_EIP197_SEL 37
+#define CLK_TOP_AXI_INFRA_SEL 38
+#define CLK_TOP_UART_SEL 39
+#define CLK_TOP_EMMC_250M_SEL 40
+#define CLK_TOP_EMMC_400M_SEL 41
+#define CLK_TOP_SPI_SEL 42
+#define CLK_TOP_SPIM_MST_SEL 43
+#define CLK_TOP_NFI1X_SEL 44
+#define CLK_TOP_SPINFI_SEL 45
+#define CLK_TOP_PWM_SEL 46
+#define CLK_TOP_I2C_SEL 47
+#define CLK_TOP_PCIE_MBIST_250M_SEL 48
+#define CLK_TOP_PEXTP_TL_SEL 49
+#define CLK_TOP_PEXTP_TL_P1_SEL 50
+#define CLK_TOP_PEXTP_TL_P2_SEL 51
+#define CLK_TOP_PEXTP_TL_P3_SEL 52
+#define CLK_TOP_USB_SYS_SEL 53
+#define CLK_TOP_USB_SYS_P1_SEL 54
+#define CLK_TOP_USB_XHCI_SEL 55
+#define CLK_TOP_USB_XHCI_P1_SEL 56
+#define CLK_TOP_USB_FRMCNT_SEL 57
+#define CLK_TOP_USB_FRMCNT_P1_SEL 58
+#define CLK_TOP_AUD_SEL 59
+#define CLK_TOP_A1SYS_SEL 60
+#define CLK_TOP_AUD_L_SEL 61
+#define CLK_TOP_A_TUNER_SEL 62
+#define CLK_TOP_SSPXTP_SEL 63
+#define CLK_TOP_USB_PHY_SEL 64
+#define CLK_TOP_USXGMII_SBUS_0_SEL 65
+#define CLK_TOP_USXGMII_SBUS_1_SEL 66
+#define CLK_TOP_SGM_0_SEL 67
+#define CLK_TOP_SGM_SBUS_0_SEL 68
+#define CLK_TOP_SGM_1_SEL 69
+#define CLK_TOP_SGM_SBUS_1_SEL 70
+#define CLK_TOP_XFI_PHY_0_XTAL_SEL 71
+#define CLK_TOP_XFI_PHY_1_XTAL_SEL 72
+#define CLK_TOP_SYSAXI_SEL 73
+#define CLK_TOP_SYSAPB_SEL 74
+#define CLK_TOP_ETH_REFCK_50M_SEL 75
+#define CLK_TOP_ETH_SYS_200M_SEL 76
+#define CLK_TOP_ETH_SYS_SEL 77
+#define CLK_TOP_ETH_XGMII_SEL 78
+#define CLK_TOP_BUS_TOPS_SEL 79
+#define CLK_TOP_NPU_TOPS_SEL 80
+#define CLK_TOP_DRAMC_SEL 81
+#define CLK_TOP_DRAMC_MD32_SEL 82
+#define CLK_TOP_INFRA_F26M_SEL 83
+#define CLK_TOP_PEXTP_P0_SEL 84
+#define CLK_TOP_PEXTP_P1_SEL 85
+#define CLK_TOP_PEXTP_P2_SEL 86
+#define CLK_TOP_PEXTP_P3_SEL 87
+#define CLK_TOP_DA_XTP_GLB_P0_SEL 88
+#define CLK_TOP_DA_XTP_GLB_P1_SEL 89
+#define CLK_TOP_DA_XTP_GLB_P2_SEL 90
+#define CLK_TOP_DA_XTP_GLB_P3_SEL 91
+#define CLK_TOP_CKM_SEL 92
+#define CLK_TOP_DA_SEL 93
+#define CLK_TOP_PEXTP_SEL 94
+#define CLK_TOP_TOPS_P2_26M_SEL 95
+#define CLK_TOP_MCUSYS_BACKUP_625M_SEL 96
+#define CLK_TOP_NETSYS_SYNC_250M_SEL 97
+#define CLK_TOP_MACSEC_SEL 98
+#define CLK_TOP_NETSYS_TOPS_400M_SEL 99
+#define CLK_TOP_NETSYS_PPEFB_250M_SEL 100
+#define CLK_TOP_NETSYS_WARP_SEL 101
+#define CLK_TOP_ETH_MII_SEL 102
+#define CLK_TOP_NPU_SEL 103
+#define CLK_TOP_AUD_I2S_M 104
+
+/* MCUSYS */
+
+#define CLK_MCU_BUS_DIV_SEL 0
+#define CLK_MCU_ARM_DIV_SEL 1
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_MUX_UART0_SEL 0
+#define CLK_INFRA_MUX_UART1_SEL 1
+#define CLK_INFRA_MUX_UART2_SEL 2
+#define CLK_INFRA_MUX_SPI0_SEL 3
+#define CLK_INFRA_MUX_SPI1_SEL 4
+#define CLK_INFRA_MUX_SPI2_SEL 5
+#define CLK_INFRA_PWM_SEL 6
+#define CLK_INFRA_PWM_CK1_SEL 7
+#define CLK_INFRA_PWM_CK2_SEL 8
+#define CLK_INFRA_PWM_CK3_SEL 9
+#define CLK_INFRA_PWM_CK4_SEL 10
+#define CLK_INFRA_PWM_CK5_SEL 11
+#define CLK_INFRA_PWM_CK6_SEL 12
+#define CLK_INFRA_PWM_CK7_SEL 13
+#define CLK_INFRA_PWM_CK8_SEL 14
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P0_SEL 15
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P1_SEL 16
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P2_SEL 17
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P3_SEL 18
+
+/* INFRACFG */
+
+#define CLK_INFRA_PCIE_PERI_26M_CK_P0 19
+#define CLK_INFRA_PCIE_PERI_26M_CK_P1 20
+#define CLK_INFRA_PCIE_PERI_26M_CK_P2 21
+#define CLK_INFRA_PCIE_PERI_26M_CK_P3 22
+#define CLK_INFRA_66M_GPT_BCK 23
+#define CLK_INFRA_66M_PWM_HCK 24
+#define CLK_INFRA_66M_PWM_BCK 25
+#define CLK_INFRA_66M_PWM_CK1 26
+#define CLK_INFRA_66M_PWM_CK2 27
+#define CLK_INFRA_66M_PWM_CK3 28
+#define CLK_INFRA_66M_PWM_CK4 29
+#define CLK_INFRA_66M_PWM_CK5 30
+#define CLK_INFRA_66M_PWM_CK6 31
+#define CLK_INFRA_66M_PWM_CK7 32
+#define CLK_INFRA_66M_PWM_CK8 33
+#define CLK_INFRA_133M_CQDMA_BCK 34
+#define CLK_INFRA_66M_AUD_SLV_BCK 35
+#define CLK_INFRA_AUD_26M 36
+#define CLK_INFRA_AUD_L 37
+#define CLK_INFRA_AUD_AUD 38
+#define CLK_INFRA_AUD_EG2 39
+#define CLK_INFRA_DRAMC_F26M 40
+#define CLK_INFRA_133M_DBG_ACKM 41
+#define CLK_INFRA_66M_AP_DMA_BCK 42
+#define CLK_INFRA_66M_SEJ_BCK 43
+#define CLK_INFRA_PRE_CK_SEJ_F13M 44
+#define CLK_INFRA_26M_THERM_SYSTEM 45
+#define CLK_INFRA_I2C_BCK 46
+#define CLK_INFRA_52M_UART0_CK 47
+#define CLK_INFRA_52M_UART1_CK 48
+#define CLK_INFRA_52M_UART2_CK 49
+#define CLK_INFRA_NFI 50
+#define CLK_INFRA_SPINFI 51
+#define CLK_INFRA_66M_NFI_HCK 52
+#define CLK_INFRA_104M_SPI0 53
+#define CLK_INFRA_104M_SPI1 54
+#define CLK_INFRA_104M_SPI2_BCK 55
+#define CLK_INFRA_66M_SPI0_HCK 56
+#define CLK_INFRA_66M_SPI1_HCK 57
+#define CLK_INFRA_66M_SPI2_HCK 58
+#define CLK_INFRA_66M_FLASHIF_AXI 59
+#define CLK_INFRA_RTC 60
+#define CLK_INFRA_26M_ADC_BCK 61
+#define CLK_INFRA_RC_ADC 62
+#define CLK_INFRA_MSDC400 63
+#define CLK_INFRA_MSDC2_HCK 64
+#define CLK_INFRA_133M_MSDC_0_HCK 65
+#define CLK_INFRA_66M_MSDC_0_HCK 66
+#define CLK_INFRA_133M_CPUM_BCK 67
+#define CLK_INFRA_BIST2FPC 68
+#define CLK_INFRA_I2C_X16W_MCK_CK_P1 69
+#define CLK_INFRA_I2C_X16W_PCK_CK_P1 70
+#define CLK_INFRA_133M_USB_HCK 71
+#define CLK_INFRA_133M_USB_HCK_CK_P1 72
+#define CLK_INFRA_66M_USB_HCK 73
+#define CLK_INFRA_66M_USB_HCK_CK_P1 74
+#define CLK_INFRA_USB_SYS 75
+#define CLK_INFRA_USB_SYS_CK_P1 76
+#define CLK_INFRA_USB_REF 77
+#define CLK_INFRA_USB_CK_P1 78
+#define CLK_INFRA_USB_FRMCNT 79
+#define CLK_INFRA_USB_FRMCNT_CK_P1 80
+#define CLK_INFRA_USB_PIPE 81
+#define CLK_INFRA_USB_PIPE_CK_P1 82
+#define CLK_INFRA_USB_UTMI 83
+#define CLK_INFRA_USB_UTMI_CK_P1 84
+#define CLK_INFRA_USB_XHCI 85
+#define CLK_INFRA_USB_XHCI_CK_P1 86
+#define CLK_INFRA_PCIE_GFMUX_TL_P0 87
+#define CLK_INFRA_PCIE_GFMUX_TL_P1 88
+#define CLK_INFRA_PCIE_GFMUX_TL_P2 89
+#define CLK_INFRA_PCIE_GFMUX_TL_P3 90
+#define CLK_INFRA_PCIE_PIPE_P0 91
+#define CLK_INFRA_PCIE_PIPE_P1 92
+#define CLK_INFRA_PCIE_PIPE_P2 93
+#define CLK_INFRA_PCIE_PIPE_P3 94
+#define CLK_INFRA_133M_PCIE_CK_P0 95
+#define CLK_INFRA_133M_PCIE_CK_P1 96
+#define CLK_INFRA_133M_PCIE_CK_P2 97
+#define CLK_INFRA_133M_PCIE_CK_P3 98
+
+/* ETHDMA */
+
+#define CLK_ETHDMA_XGP1_EN 0
+#define CLK_ETHDMA_XGP2_EN 1
+#define CLK_ETHDMA_XGP3_EN 2
+#define CLK_ETHDMA_FE_EN 3
+#define CLK_ETHDMA_GP2_EN 4
+#define CLK_ETHDMA_GP1_EN 5
+#define CLK_ETHDMA_GP3_EN 6
+#define CLK_ETHDMA_ESW_EN 7
+#define CLK_ETHDMA_CRYPT0_EN 8
+#define CLK_ETHDMA_NR_CLK 9
+
+/* SGMIISYS_0 */
+
+#define CLK_SGM0_TX_EN 0
+#define CLK_SGM0_RX_EN 1
+#define CLK_SGMII0_NR_CLK 2
+
+/* SGMIISYS_1 */
+
+#define CLK_SGM1_TX_EN 0
+#define CLK_SGM1_RX_EN 1
+#define CLK_SGMII1_NR_CLK 2
+
+/* ETHWARP */
+
+#define CLK_ETHWARP_WOCPU2_EN 0
+#define CLK_ETHWARP_WOCPU1_EN 1
+#define CLK_ETHWARP_WOCPU0_EN 2
+#define CLK_ETHWARP_NR_CLK 3
+
+/* XFIPLL */
+#define CLK_XFIPLL_PLL 0
+#define CLK_XFIPLL_PLL_EN 1
+
+#endif /* _DT_BINDINGS_CLK_MT7988_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
index 2d545ed0d3..9a9bc55b49 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8939.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -193,6 +193,12 @@
#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
#define GCC_OXILI_TIMER_CLK 185
#define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186
+#define CSI2_CLK_SRC 187
+#define GCC_CAMSS_CSI2_AHB_CLK 188
+#define GCC_CAMSS_CSI2_CLK 189
+#define GCC_CAMSS_CSI2PHY_CLK 190
+#define GCC_CAMSS_CSI2PIX_CLK 191
+#define GCC_CAMSS_CSI2RDI_CLK 192
/* Indexes for GDSCs */
#define BIMC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
new file mode 100644
index 0000000000..731e404a2c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+#define _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+
+/* ECPRI_CC clocks */
+#define ECPRI_CC_PLL0 0
+#define ECPRI_CC_PLL1 1
+#define ECPRI_CC_ECPRI_CG_CLK 2
+#define ECPRI_CC_ECPRI_CLK_SRC 3
+#define ECPRI_CC_ECPRI_DMA_CLK 4
+#define ECPRI_CC_ECPRI_DMA_CLK_SRC 5
+#define ECPRI_CC_ECPRI_DMA_NOC_CLK 6
+#define ECPRI_CC_ECPRI_FAST_CLK 7
+#define ECPRI_CC_ECPRI_FAST_CLK_SRC 8
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK 9
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK_SRC 10
+#define ECPRI_CC_ECPRI_FAST_DIV2_NOC_CLK 11
+#define ECPRI_CC_ECPRI_FR_CLK 12
+#define ECPRI_CC_ECPRI_ORAN_CLK_SRC 13
+#define ECPRI_CC_ECPRI_ORAN_DIV2_CLK 14
+#define ECPRI_CC_ETH_100G_C2C0_HM_FF_CLK_SRC 15
+#define ECPRI_CC_ETH_100G_C2C0_UDP_FIFO_CLK 16
+#define ECPRI_CC_ETH_100G_C2C1_UDP_FIFO_CLK 17
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_0_CLK 18
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_1_CLK 19
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_0_DIV_CLK_SRC 20
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_1_DIV_CLK_SRC 21
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK 22
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK_SRC 23
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_CLK 24
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_DIV_CLK_SRC 25
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_CLK 26
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_DIV_CLK_SRC 27
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_CLK_SRC 28
+#define ECPRI_CC_ETH_100G_DBG_C2C_UDP_FIFO_CLK 29
+#define ECPRI_CC_ETH_100G_FH0_HM_FF_CLK_SRC 30
+#define ECPRI_CC_ETH_100G_FH0_MACSEC_CLK_SRC 31
+#define ECPRI_CC_ETH_100G_FH1_HM_FF_CLK_SRC 32
+#define ECPRI_CC_ETH_100G_FH1_MACSEC_CLK_SRC 33
+#define ECPRI_CC_ETH_100G_FH2_HM_FF_CLK_SRC 34
+#define ECPRI_CC_ETH_100G_FH2_MACSEC_CLK_SRC 35
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_CLK 36
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_DIV_CLK_SRC 37
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_CLK 38
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_DIV_CLK_SRC 39
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_CLK 40
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_DIV_CLK_SRC 41
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_CLK 42
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_DIV_CLK_SRC 43
+#define ECPRI_CC_ETH_100G_FH_0_UDP_FIFO_CLK 44
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_CLK 45
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_DIV_CLK_SRC 46
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_CLK 47
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_DIV_CLK_SRC 48
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_CLK 49
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_DIV_CLK_SRC 50
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_CLK 51
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_DIV_CLK_SRC 52
+#define ECPRI_CC_ETH_100G_FH_1_UDP_FIFO_CLK 53
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_CLK 54
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_DIV_CLK_SRC 55
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_CLK 56
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_DIV_CLK_SRC 57
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_CLK 58
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_DIV_CLK_SRC 59
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_CLK 60
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_DIV_CLK_SRC 61
+#define ECPRI_CC_ETH_100G_FH_2_UDP_FIFO_CLK 62
+#define ECPRI_CC_ETH_100G_FH_MACSEC_0_CLK 63
+#define ECPRI_CC_ETH_100G_FH_MACSEC_1_CLK 64
+#define ECPRI_CC_ETH_100G_FH_MACSEC_2_CLK 65
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK 66
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK_SRC 67
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK 68
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK_SRC 69
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK 70
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK_SRC 71
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK 72
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK_SRC 73
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK 74
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK_SRC 75
+#define ECPRI_CC_ETH_DBG_NFAPI_AXI_CLK 76
+#define ECPRI_CC_ETH_DBG_NOC_AXI_CLK 77
+#define ECPRI_CC_ETH_PHY_0_OCK_SRAM_CLK 78
+#define ECPRI_CC_ETH_PHY_1_OCK_SRAM_CLK 79
+#define ECPRI_CC_ETH_PHY_2_OCK_SRAM_CLK 80
+#define ECPRI_CC_ETH_PHY_3_OCK_SRAM_CLK 81
+#define ECPRI_CC_ETH_PHY_4_OCK_SRAM_CLK 82
+#define ECPRI_CC_MSS_EMAC_CLK 83
+#define ECPRI_CC_MSS_EMAC_CLK_SRC 84
+#define ECPRI_CC_MSS_ORAN_CLK 85
+#define ECPRI_CC_PHY0_LANE0_RX_CLK 86
+#define ECPRI_CC_PHY0_LANE0_TX_CLK 87
+#define ECPRI_CC_PHY0_LANE1_RX_CLK 88
+#define ECPRI_CC_PHY0_LANE1_TX_CLK 89
+#define ECPRI_CC_PHY0_LANE2_RX_CLK 90
+#define ECPRI_CC_PHY0_LANE2_TX_CLK 91
+#define ECPRI_CC_PHY0_LANE3_RX_CLK 92
+#define ECPRI_CC_PHY0_LANE3_TX_CLK 93
+#define ECPRI_CC_PHY1_LANE0_RX_CLK 94
+#define ECPRI_CC_PHY1_LANE0_TX_CLK 95
+#define ECPRI_CC_PHY1_LANE1_RX_CLK 96
+#define ECPRI_CC_PHY1_LANE1_TX_CLK 97
+#define ECPRI_CC_PHY1_LANE2_RX_CLK 98
+#define ECPRI_CC_PHY1_LANE2_TX_CLK 99
+#define ECPRI_CC_PHY1_LANE3_RX_CLK 100
+#define ECPRI_CC_PHY1_LANE3_TX_CLK 101
+#define ECPRI_CC_PHY2_LANE0_RX_CLK 102
+#define ECPRI_CC_PHY2_LANE0_TX_CLK 103
+#define ECPRI_CC_PHY2_LANE1_RX_CLK 104
+#define ECPRI_CC_PHY2_LANE1_TX_CLK 105
+#define ECPRI_CC_PHY2_LANE2_RX_CLK 106
+#define ECPRI_CC_PHY2_LANE2_TX_CLK 107
+#define ECPRI_CC_PHY2_LANE3_RX_CLK 108
+#define ECPRI_CC_PHY2_LANE3_TX_CLK 109
+#define ECPRI_CC_PHY3_LANE0_RX_CLK 110
+#define ECPRI_CC_PHY3_LANE0_TX_CLK 111
+#define ECPRI_CC_PHY3_LANE1_RX_CLK 112
+#define ECPRI_CC_PHY3_LANE1_TX_CLK 113
+#define ECPRI_CC_PHY3_LANE2_RX_CLK 114
+#define ECPRI_CC_PHY3_LANE2_TX_CLK 115
+#define ECPRI_CC_PHY3_LANE3_RX_CLK 116
+#define ECPRI_CC_PHY3_LANE3_TX_CLK 117
+#define ECPRI_CC_PHY4_LANE0_RX_CLK 118
+#define ECPRI_CC_PHY4_LANE0_TX_CLK 119
+#define ECPRI_CC_PHY4_LANE1_RX_CLK 120
+#define ECPRI_CC_PHY4_LANE1_TX_CLK 121
+#define ECPRI_CC_PHY4_LANE2_RX_CLK 122
+#define ECPRI_CC_PHY4_LANE2_TX_CLK 123
+#define ECPRI_CC_PHY4_LANE3_RX_CLK 124
+#define ECPRI_CC_PHY4_LANE3_TX_CLK 125
+
+/* ECPRI_CC resets */
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ECPRI_SS_BCR 0
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_C2C_BCR 1
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH0_BCR 2
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH1_BCR 3
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH2_BCR 4
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_WRAPPER_TOP_BCR 5
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_MODEM_BCR 6
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_NOC_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sc8280xp-camcc.h b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
new file mode 100644
index 0000000000..ea5ec73c8c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+#define __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+
+/* CAMCC clocks */
+#define CAMCC_PLL0 0
+#define CAMCC_PLL0_OUT_EVEN 1
+#define CAMCC_PLL0_OUT_ODD 2
+#define CAMCC_PLL1 3
+#define CAMCC_PLL1_OUT_EVEN 4
+#define CAMCC_PLL2 5
+#define CAMCC_PLL3 6
+#define CAMCC_PLL3_OUT_EVEN 7
+#define CAMCC_PLL4 8
+#define CAMCC_PLL4_OUT_EVEN 9
+#define CAMCC_PLL5 10
+#define CAMCC_PLL5_OUT_EVEN 11
+#define CAMCC_PLL6 12
+#define CAMCC_PLL6_OUT_EVEN 13
+#define CAMCC_PLL7 14
+#define CAMCC_PLL7_OUT_EVEN 15
+#define CAMCC_PLL7_OUT_ODD 16
+#define CAMCC_BPS_AHB_CLK 17
+#define CAMCC_BPS_AREG_CLK 18
+#define CAMCC_BPS_AXI_CLK 19
+#define CAMCC_BPS_CLK 20
+#define CAMCC_BPS_CLK_SRC 21
+#define CAMCC_CAMNOC_AXI_CLK 22
+#define CAMCC_CAMNOC_AXI_CLK_SRC 23
+#define CAMCC_CAMNOC_DCD_XO_CLK 24
+#define CAMCC_CCI_0_CLK 25
+#define CAMCC_CCI_0_CLK_SRC 26
+#define CAMCC_CCI_1_CLK 27
+#define CAMCC_CCI_1_CLK_SRC 28
+#define CAMCC_CCI_2_CLK 29
+#define CAMCC_CCI_2_CLK_SRC 30
+#define CAMCC_CCI_3_CLK 31
+#define CAMCC_CCI_3_CLK_SRC 32
+#define CAMCC_CORE_AHB_CLK 33
+#define CAMCC_CPAS_AHB_CLK 34
+#define CAMCC_CPHY_RX_CLK_SRC 35
+#define CAMCC_CSI0PHYTIMER_CLK 36
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 37
+#define CAMCC_CSI1PHYTIMER_CLK 38
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 39
+#define CAMCC_CSI2PHYTIMER_CLK 40
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 41
+#define CAMCC_CSI3PHYTIMER_CLK 42
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 43
+#define CAMCC_CSIPHY0_CLK 44
+#define CAMCC_CSIPHY1_CLK 45
+#define CAMCC_CSIPHY2_CLK 46
+#define CAMCC_CSIPHY3_CLK 47
+#define CAMCC_FAST_AHB_CLK_SRC 48
+#define CAMCC_GDSC_CLK 49
+#define CAMCC_ICP_AHB_CLK 50
+#define CAMCC_ICP_CLK 51
+#define CAMCC_ICP_CLK_SRC 52
+#define CAMCC_IFE_0_AXI_CLK 53
+#define CAMCC_IFE_0_CLK 54
+#define CAMCC_IFE_0_CLK_SRC 55
+#define CAMCC_IFE_0_CPHY_RX_CLK 56
+#define CAMCC_IFE_0_CSID_CLK 57
+#define CAMCC_IFE_0_CSID_CLK_SRC 58
+#define CAMCC_IFE_0_DSP_CLK 59
+#define CAMCC_IFE_1_AXI_CLK 60
+#define CAMCC_IFE_1_CLK 61
+#define CAMCC_IFE_1_CLK_SRC 62
+#define CAMCC_IFE_1_CPHY_RX_CLK 63
+#define CAMCC_IFE_1_CSID_CLK 64
+#define CAMCC_IFE_1_CSID_CLK_SRC 65
+#define CAMCC_IFE_1_DSP_CLK 66
+#define CAMCC_IFE_2_AXI_CLK 67
+#define CAMCC_IFE_2_CLK 68
+#define CAMCC_IFE_2_CLK_SRC 69
+#define CAMCC_IFE_2_CPHY_RX_CLK 70
+#define CAMCC_IFE_2_CSID_CLK 71
+#define CAMCC_IFE_2_CSID_CLK_SRC 72
+#define CAMCC_IFE_2_DSP_CLK 73
+#define CAMCC_IFE_3_AXI_CLK 74
+#define CAMCC_IFE_3_CLK 75
+#define CAMCC_IFE_3_CLK_SRC 76
+#define CAMCC_IFE_3_CPHY_RX_CLK 77
+#define CAMCC_IFE_3_CSID_CLK 78
+#define CAMCC_IFE_3_CSID_CLK_SRC 79
+#define CAMCC_IFE_3_DSP_CLK 80
+#define CAMCC_IFE_LITE_0_CLK 81
+#define CAMCC_IFE_LITE_0_CLK_SRC 82
+#define CAMCC_IFE_LITE_0_CPHY_RX_CLK 83
+#define CAMCC_IFE_LITE_0_CSID_CLK 84
+#define CAMCC_IFE_LITE_0_CSID_CLK_SRC 85
+#define CAMCC_IFE_LITE_1_CLK 86
+#define CAMCC_IFE_LITE_1_CLK_SRC 87
+#define CAMCC_IFE_LITE_1_CPHY_RX_CLK 88
+#define CAMCC_IFE_LITE_1_CSID_CLK 89
+#define CAMCC_IFE_LITE_1_CSID_CLK_SRC 90
+#define CAMCC_IFE_LITE_2_CLK 91
+#define CAMCC_IFE_LITE_2_CLK_SRC 92
+#define CAMCC_IFE_LITE_2_CPHY_RX_CLK 93
+#define CAMCC_IFE_LITE_2_CSID_CLK 94
+#define CAMCC_IFE_LITE_2_CSID_CLK_SRC 95
+#define CAMCC_IFE_LITE_3_CLK 96
+#define CAMCC_IFE_LITE_3_CLK_SRC 97
+#define CAMCC_IFE_LITE_3_CPHY_RX_CLK 98
+#define CAMCC_IFE_LITE_3_CSID_CLK 99
+#define CAMCC_IFE_LITE_3_CSID_CLK_SRC 100
+#define CAMCC_IPE_0_AHB_CLK 101
+#define CAMCC_IPE_0_AREG_CLK 102
+#define CAMCC_IPE_0_AXI_CLK 103
+#define CAMCC_IPE_0_CLK 104
+#define CAMCC_IPE_0_CLK_SRC 105
+#define CAMCC_IPE_1_AHB_CLK 106
+#define CAMCC_IPE_1_AREG_CLK 107
+#define CAMCC_IPE_1_AXI_CLK 108
+#define CAMCC_IPE_1_CLK 109
+#define CAMCC_JPEG_CLK 110
+#define CAMCC_JPEG_CLK_SRC 111
+#define CAMCC_LRME_CLK 112
+#define CAMCC_LRME_CLK_SRC 113
+#define CAMCC_MCLK0_CLK 114
+#define CAMCC_MCLK0_CLK_SRC 115
+#define CAMCC_MCLK1_CLK 116
+#define CAMCC_MCLK1_CLK_SRC 117
+#define CAMCC_MCLK2_CLK 118
+#define CAMCC_MCLK2_CLK_SRC 119
+#define CAMCC_MCLK3_CLK 120
+#define CAMCC_MCLK3_CLK_SRC 121
+#define CAMCC_MCLK4_CLK 122
+#define CAMCC_MCLK4_CLK_SRC 123
+#define CAMCC_MCLK5_CLK 124
+#define CAMCC_MCLK5_CLK_SRC 125
+#define CAMCC_MCLK6_CLK 126
+#define CAMCC_MCLK6_CLK_SRC 127
+#define CAMCC_MCLK7_CLK 128
+#define CAMCC_MCLK7_CLK_SRC 129
+#define CAMCC_SLEEP_CLK 130
+#define CAMCC_SLEEP_CLK_SRC 131
+#define CAMCC_SLOW_AHB_CLK_SRC 132
+#define CAMCC_XO_CLK_SRC 133
+
+/* CAMCC resets */
+#define CAMCC_BPS_BCR 0
+#define CAMCC_CAMNOC_BCR 1
+#define CAMCC_CCI_BCR 2
+#define CAMCC_CPAS_BCR 3
+#define CAMCC_CSI0PHY_BCR 4
+#define CAMCC_CSI1PHY_BCR 5
+#define CAMCC_CSI2PHY_BCR 6
+#define CAMCC_CSI3PHY_BCR 7
+#define CAMCC_ICP_BCR 8
+#define CAMCC_IFE_0_BCR 9
+#define CAMCC_IFE_1_BCR 10
+#define CAMCC_IFE_2_BCR 11
+#define CAMCC_IFE_3_BCR 12
+#define CAMCC_IFE_LITE_0_BCR 13
+#define CAMCC_IFE_LITE_1_BCR 14
+#define CAMCC_IFE_LITE_2_BCR 15
+#define CAMCC_IFE_LITE_3_BCR 16
+#define CAMCC_IPE_0_BCR 17
+#define CAMCC_IPE_1_BCR 18
+#define CAMCC_JPEG_BCR 19
+#define CAMCC_LRME_BCR 20
+
+/* CAMCC GDSCRs */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IFE_2_GDSC 3
+#define IFE_3_GDSC 4
+#define IPE_0_GDSC 5
+#define IPE_1_GDSC 6
+#define TITAN_TOP_GDSC 7
+
+#endif /* __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__ */
diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
new file mode 100644
index 0000000000..b0a668b395
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_ACCU_CLK 0
+#define DISP_CC_MDSS_AHB1_CLK 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK 8
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
+#define DISP_CC_MDSS_ESC0_CLK 56
+#define DISP_CC_MDSS_ESC0_CLK_SRC 57
+#define DISP_CC_MDSS_ESC1_CLK 58
+#define DISP_CC_MDSS_ESC1_CLK_SRC 59
+#define DISP_CC_MDSS_MDP1_CLK 60
+#define DISP_CC_MDSS_MDP_CLK 61
+#define DISP_CC_MDSS_MDP_CLK_SRC 62
+#define DISP_CC_MDSS_MDP_LUT1_CLK 63
+#define DISP_CC_MDSS_MDP_LUT_CLK 64
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK 66
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
+#define DISP_CC_MDSS_PCLK1_CLK 68
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
+#define DISP_CC_MDSS_RSCC_AHB_CLK 70
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
+#define DISP_CC_MDSS_VSYNC1_CLK 72
+#define DISP_CC_MDSS_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
+#define DISP_CC_PLL0 75
+#define DISP_CC_PLL1 76
+#define DISP_CC_SLEEP_CLK 77
+#define DISP_CC_SLEEP_CLK_SRC 78
+#define DISP_CC_XO_CLK 79
+#define DISP_CC_XO_CLK_SRC 80
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-gcc.h b/include/dt-bindings/clock/qcom,sm8650-gcc.h
new file mode 100644
index 0000000000..0c543ba460
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gcc.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10
+#define GCC_CNOC_PCIE_SF_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_DISP_XO_CLK 16
+#define GCC_GP1_CLK 17
+#define GCC_GP1_CLK_SRC 18
+#define GCC_GP2_CLK 19
+#define GCC_GP2_CLK_SRC 20
+#define GCC_GP3_CLK 21
+#define GCC_GP3_CLK_SRC 22
+#define GCC_GPLL0 23
+#define GCC_GPLL0_OUT_EVEN 24
+#define GCC_GPLL1 25
+#define GCC_GPLL3 26
+#define GCC_GPLL4 27
+#define GCC_GPLL6 28
+#define GCC_GPLL7 29
+#define GCC_GPLL9 30
+#define GCC_GPU_CFG_AHB_CLK 31
+#define GCC_GPU_GPLL0_CLK_SRC 32
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 33
+#define GCC_GPU_MEMNOC_GFX_CLK 34
+#define GCC_GPU_SNOC_DVM_GFX_CLK 35
+#define GCC_PCIE_0_AUX_CLK 36
+#define GCC_PCIE_0_AUX_CLK_SRC 37
+#define GCC_PCIE_0_CFG_AHB_CLK 38
+#define GCC_PCIE_0_MSTR_AXI_CLK 39
+#define GCC_PCIE_0_PHY_RCHNG_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 41
+#define GCC_PCIE_0_PIPE_CLK 42
+#define GCC_PCIE_0_PIPE_CLK_SRC 43
+#define GCC_PCIE_0_SLV_AXI_CLK 44
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45
+#define GCC_PCIE_1_AUX_CLK 46
+#define GCC_PCIE_1_AUX_CLK_SRC 47
+#define GCC_PCIE_1_CFG_AHB_CLK 48
+#define GCC_PCIE_1_MSTR_AXI_CLK 49
+#define GCC_PCIE_1_PHY_AUX_CLK 50
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 51
+#define GCC_PCIE_1_PHY_RCHNG_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_PIPE_CLK_SRC 55
+#define GCC_PCIE_1_SLV_AXI_CLK 56
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 63
+#define GCC_QMIP_DISP_AHB_CLK 64
+#define GCC_QMIP_GPU_AHB_CLK 65
+#define GCC_QMIP_PCIE_AHB_CLK 66
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 67
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 68
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 69
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 70
+#define GCC_QUPV3_I2C_CORE_CLK 71
+#define GCC_QUPV3_I2C_S0_CLK 72
+#define GCC_QUPV3_I2C_S0_CLK_SRC 73
+#define GCC_QUPV3_I2C_S1_CLK 74
+#define GCC_QUPV3_I2C_S1_CLK_SRC 75
+#define GCC_QUPV3_I2C_S2_CLK 76
+#define GCC_QUPV3_I2C_S2_CLK_SRC 77
+#define GCC_QUPV3_I2C_S3_CLK 78
+#define GCC_QUPV3_I2C_S3_CLK_SRC 79
+#define GCC_QUPV3_I2C_S4_CLK 80
+#define GCC_QUPV3_I2C_S4_CLK_SRC 81
+#define GCC_QUPV3_I2C_S5_CLK 82
+#define GCC_QUPV3_I2C_S5_CLK_SRC 83
+#define GCC_QUPV3_I2C_S6_CLK 84
+#define GCC_QUPV3_I2C_S6_CLK_SRC 85
+#define GCC_QUPV3_I2C_S7_CLK 86
+#define GCC_QUPV3_I2C_S7_CLK_SRC 87
+#define GCC_QUPV3_I2C_S8_CLK 88
+#define GCC_QUPV3_I2C_S8_CLK_SRC 89
+#define GCC_QUPV3_I2C_S9_CLK 90
+#define GCC_QUPV3_I2C_S9_CLK_SRC 91
+#define GCC_QUPV3_I2C_S_AHB_CLK 92
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP1_CORE_CLK 94
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S0_CLK 97
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S1_CLK 99
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S2_CLK 101
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S3_CLK 103
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S4_CLK 105
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S5_CLK 107
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108
+#define GCC_QUPV3_WRAP1_S6_CLK 109
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_S7_CLK 111
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 113
+#define GCC_QUPV3_WRAP2_CORE_CLK 114
+#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 115
+#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 116
+#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 117
+#define GCC_QUPV3_WRAP2_S0_CLK 118
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 119
+#define GCC_QUPV3_WRAP2_S1_CLK 120
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 121
+#define GCC_QUPV3_WRAP2_S2_CLK 122
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S3_CLK 124
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S4_CLK 126
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S5_CLK 128
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S6_CLK 130
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S7_CLK 132
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 133
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 134
+#define GCC_QUPV3_WRAP3_CORE_CLK 135
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK 136
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK_SRC 137
+#define GCC_QUPV3_WRAP3_S0_CLK 138
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 139
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 142
+#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 143
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 144
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 145
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 146
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 147
+#define GCC_SDCC2_AHB_CLK 148
+#define GCC_SDCC2_APPS_CLK 149
+#define GCC_SDCC2_APPS_CLK_SRC 150
+#define GCC_SDCC4_AHB_CLK 151
+#define GCC_SDCC4_APPS_CLK 152
+#define GCC_SDCC4_APPS_CLK_SRC 153
+#define GCC_UFS_PHY_AHB_CLK 154
+#define GCC_UFS_PHY_AXI_CLK 155
+#define GCC_UFS_PHY_AXI_CLK_SRC 156
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157
+#define GCC_UFS_PHY_ICE_CORE_CLK 158
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160
+#define GCC_UFS_PHY_PHY_AUX_CLK 161
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 174
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177
+#define GCC_USB30_PRIM_SLEEP_CLK 178
+#define GCC_USB3_PRIM_PHY_AUX_CLK 179
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 180
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 181
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 182
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 183
+#define GCC_VIDEO_AHB_CLK 184
+#define GCC_VIDEO_AXI0_CLK 185
+#define GCC_VIDEO_AXI1_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+#define GCC_GPLL0_AO 188
+#define GCC_GPLL0_OUT_EVEN_AO 189
+#define GCC_GPLL1_AO 190
+#define GCC_GPLL3_AO 191
+#define GCC_GPLL4_AO 192
+#define GCC_GPLL6_AO 193
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_1_BCR 17
+#define GCC_QUPV3_WRAPPER_2_BCR 18
+#define GCC_QUPV3_WRAPPER_3_BCR 19
+#define GCC_QUPV3_WRAPPER_I2C_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-gpucc.h b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
new file mode 100644
index 0000000000..d0dc457cfe
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_ACCU_SHIFT_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_DEMET_CLK 7
+#define GPU_CC_DPM_CLK 8
+#define GPU_CC_FF_CLK_SRC 9
+#define GPU_CC_FREQ_MEASURE_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_ACCU_SHIFT_CLK 12
+#define GPU_CC_GX_FF_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_RDVM_CLK 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18
+#define GPU_CC_HUB_AON_CLK 19
+#define GPU_CC_HUB_CLK_SRC 20
+#define GPU_CC_HUB_CX_INT_CLK 21
+#define GPU_CC_HUB_DIV_CLK_SRC 22
+#define GPU_CC_MEMNOC_GFX_CLK 23
+#define GPU_CC_PLL0 24
+#define GPU_CC_PLL1 25
+#define GPU_CC_SLEEP_CLK 26
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-tcsr.h b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
new file mode 100644
index 0000000000..b2c72d492f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_UFS_CLKREF_EN 2
+#define TCSR_UFS_PAD_CLKREF_EN 3
+#define TCSR_USB2_CLKREF_EN 4
+#define TCSR_USB3_CLKREF_EN 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
index e24ee840cf..c557b78dc5 100644
--- a/include/dt-bindings/clock/qcom,videocc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
@@ -16,6 +16,10 @@
/* VIDEO_CC Resets */
#define VIDEO_CC_MVSC_CORE_CLK_BCR 0
+#define VIDEO_CC_INTERFACE_BCR 1
+#define VIDEO_CC_MVS0_BCR 2
+#define VIDEO_CC_MVS1_BCR 3
+#define VIDEO_CC_MVSC_BCR 4
/* VIDEO_CC GDSCRs */
#define VENUS_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
new file mode 100644
index 0000000000..24ba9e2a5c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_USB_NORTH_AXI_CLK 0
+#define GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_MP_AXI_CLK 4
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 6
+#define GCC_AGGRE_USB3_TERT_AXI_CLK 7
+#define GCC_AGGRE_USB4_0_AXI_CLK 8
+#define GCC_AGGRE_USB4_1_AXI_CLK 9
+#define GCC_AGGRE_USB4_2_AXI_CLK 10
+#define GCC_AGGRE_USB_NOC_AXI_CLK 11
+#define GCC_AV1E_AHB_CLK 12
+#define GCC_AV1E_AXI_CLK 13
+#define GCC_AV1E_XO_CLK 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CAMERA_AHB_CLK 16
+#define GCC_CAMERA_HF_AXI_CLK 17
+#define GCC_CAMERA_SF_AXI_CLK 18
+#define GCC_CAMERA_XO_CLK 19
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 20
+#define GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK 21
+#define GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK 22
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 24
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 25
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 26
+#define GCC_CFG_NOC_USB3_TERT_AXI_CLK 27
+#define GCC_CFG_NOC_USB_ANOC_AHB_CLK 28
+#define GCC_CFG_NOC_USB_ANOC_NORTH_AHB_CLK 29
+#define GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK 30
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 31
+#define GCC_CNOC_PCIE2_TUNNEL_CLK 32
+#define GCC_CNOC_PCIE_NORTH_SF_AXI_CLK 33
+#define GCC_CNOC_PCIE_SOUTH_SF_AXI_CLK 34
+#define GCC_CNOC_PCIE_TUNNEL_CLK 35
+#define GCC_DDRSS_GPU_AXI_CLK 36
+#define GCC_DISP_AHB_CLK 37
+#define GCC_DISP_HF_AXI_CLK 38
+#define GCC_DISP_XO_CLK 39
+#define GCC_GP1_CLK 40
+#define GCC_GP1_CLK_SRC 41
+#define GCC_GP2_CLK 42
+#define GCC_GP2_CLK_SRC 43
+#define GCC_GP3_CLK 44
+#define GCC_GP3_CLK_SRC 45
+#define GCC_GPLL0 46
+#define GCC_GPLL0_OUT_EVEN 47
+#define GCC_GPLL4 48
+#define GCC_GPLL7 49
+#define GCC_GPLL8 50
+#define GCC_GPLL9 51
+#define GCC_GPU_CFG_AHB_CLK 52
+#define GCC_GPU_GPLL0_CPH_CLK_SRC 53
+#define GCC_GPU_GPLL0_DIV_CPH_CLK_SRC 54
+#define GCC_GPU_MEMNOC_GFX_CLK 55
+#define GCC_GPU_SNOC_DVM_GFX_CLK 56
+#define GCC_PCIE0_PHY_RCHNG_CLK 57
+#define GCC_PCIE1_PHY_RCHNG_CLK 58
+#define GCC_PCIE2_PHY_RCHNG_CLK 59
+#define GCC_PCIE_0_AUX_CLK 60
+#define GCC_PCIE_0_AUX_CLK_SRC 61
+#define GCC_PCIE_0_CFG_AHB_CLK 62
+#define GCC_PCIE_0_MSTR_AXI_CLK 63
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 64
+#define GCC_PCIE_0_PIPE_CLK 65
+#define GCC_PCIE_0_SLV_AXI_CLK 66
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 67
+#define GCC_PCIE_1_AUX_CLK 68
+#define GCC_PCIE_1_AUX_CLK_SRC 69
+#define GCC_PCIE_1_CFG_AHB_CLK 70
+#define GCC_PCIE_1_MSTR_AXI_CLK 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 72
+#define GCC_PCIE_1_PIPE_CLK 73
+#define GCC_PCIE_1_SLV_AXI_CLK 74
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 75
+#define GCC_PCIE_2_AUX_CLK 76
+#define GCC_PCIE_2_AUX_CLK_SRC 77
+#define GCC_PCIE_2_CFG_AHB_CLK 78
+#define GCC_PCIE_2_MSTR_AXI_CLK 79
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 80
+#define GCC_PCIE_2_PIPE_CLK 81
+#define GCC_PCIE_2_SLV_AXI_CLK 82
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 83
+#define GCC_PCIE_3_AUX_CLK 84
+#define GCC_PCIE_3_AUX_CLK_SRC 85
+#define GCC_PCIE_3_CFG_AHB_CLK 86
+#define GCC_PCIE_3_MSTR_AXI_CLK 87
+#define GCC_PCIE_3_PHY_AUX_CLK 88
+#define GCC_PCIE_3_PHY_RCHNG_CLK 89
+#define GCC_PCIE_3_PHY_RCHNG_CLK_SRC 90
+#define GCC_PCIE_3_PIPE_CLK 91
+#define GCC_PCIE_3_PIPE_DIV_CLK_SRC 92
+#define GCC_PCIE_3_PIPEDIV2_CLK 93
+#define GCC_PCIE_3_SLV_AXI_CLK 94
+#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_4_AUX_CLK 96
+#define GCC_PCIE_4_AUX_CLK_SRC 97
+#define GCC_PCIE_4_CFG_AHB_CLK 98
+#define GCC_PCIE_4_MSTR_AXI_CLK 99
+#define GCC_PCIE_4_PHY_RCHNG_CLK 100
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 101
+#define GCC_PCIE_4_PIPE_CLK 102
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 103
+#define GCC_PCIE_4_PIPEDIV2_CLK 104
+#define GCC_PCIE_4_SLV_AXI_CLK 105
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 106
+#define GCC_PCIE_5_AUX_CLK 107
+#define GCC_PCIE_5_AUX_CLK_SRC 108
+#define GCC_PCIE_5_CFG_AHB_CLK 109
+#define GCC_PCIE_5_MSTR_AXI_CLK 110
+#define GCC_PCIE_5_PHY_RCHNG_CLK 111
+#define GCC_PCIE_5_PHY_RCHNG_CLK_SRC 112
+#define GCC_PCIE_5_PIPE_CLK 113
+#define GCC_PCIE_5_PIPE_DIV_CLK_SRC 114
+#define GCC_PCIE_5_PIPEDIV2_CLK 115
+#define GCC_PCIE_5_SLV_AXI_CLK 116
+#define GCC_PCIE_5_SLV_Q2A_AXI_CLK 117
+#define GCC_PCIE_6A_AUX_CLK 118
+#define GCC_PCIE_6A_AUX_CLK_SRC 119
+#define GCC_PCIE_6A_CFG_AHB_CLK 120
+#define GCC_PCIE_6A_MSTR_AXI_CLK 121
+#define GCC_PCIE_6A_PHY_AUX_CLK 122
+#define GCC_PCIE_6A_PHY_RCHNG_CLK 123
+#define GCC_PCIE_6A_PHY_RCHNG_CLK_SRC 124
+#define GCC_PCIE_6A_PIPE_CLK 125
+#define GCC_PCIE_6A_PIPE_DIV_CLK_SRC 126
+#define GCC_PCIE_6A_PIPEDIV2_CLK 127
+#define GCC_PCIE_6A_SLV_AXI_CLK 128
+#define GCC_PCIE_6A_SLV_Q2A_AXI_CLK 129
+#define GCC_PCIE_6B_AUX_CLK 130
+#define GCC_PCIE_6B_AUX_CLK_SRC 131
+#define GCC_PCIE_6B_CFG_AHB_CLK 132
+#define GCC_PCIE_6B_MSTR_AXI_CLK 133
+#define GCC_PCIE_6B_PHY_AUX_CLK 134
+#define GCC_PCIE_6B_PHY_RCHNG_CLK 135
+#define GCC_PCIE_6B_PHY_RCHNG_CLK_SRC 136
+#define GCC_PCIE_6B_PIPE_CLK 137
+#define GCC_PCIE_6B_PIPE_DIV_CLK_SRC 138
+#define GCC_PCIE_6B_PIPEDIV2_CLK 139
+#define GCC_PCIE_6B_SLV_AXI_CLK 140
+#define GCC_PCIE_6B_SLV_Q2A_AXI_CLK 141
+#define GCC_PCIE_RSCC_AHB_CLK 142
+#define GCC_PCIE_RSCC_XO_CLK 143
+#define GCC_PCIE_RSCC_XO_CLK_SRC 144
+#define GCC_PDM2_CLK 145
+#define GCC_PDM2_CLK_SRC 146
+#define GCC_PDM_AHB_CLK 147
+#define GCC_PDM_XO4_CLK 148
+#define GCC_QMIP_AV1E_AHB_CLK 149
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 150
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 151
+#define GCC_QMIP_DISP_AHB_CLK 152
+#define GCC_QMIP_GPU_AHB_CLK 153
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 154
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 155
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 156
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 157
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 158
+#define GCC_QUPV3_WRAP0_CORE_CLK 159
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK 160
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK 161
+#define GCC_QUPV3_WRAP0_S0_CLK 162
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 163
+#define GCC_QUPV3_WRAP0_S1_CLK 164
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 165
+#define GCC_QUPV3_WRAP0_S2_CLK 166
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 167
+#define GCC_QUPV3_WRAP0_S2_DIV_CLK_SRC 168
+#define GCC_QUPV3_WRAP0_S3_CLK 169
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 170
+#define GCC_QUPV3_WRAP0_S3_DIV_CLK_SRC 171
+#define GCC_QUPV3_WRAP0_S4_CLK 172
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 173
+#define GCC_QUPV3_WRAP0_S5_CLK 174
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 175
+#define GCC_QUPV3_WRAP0_S6_CLK 176
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 177
+#define GCC_QUPV3_WRAP0_S7_CLK 178
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 179
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 180
+#define GCC_QUPV3_WRAP1_CORE_CLK 181
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK 182
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK 183
+#define GCC_QUPV3_WRAP1_S0_CLK 184
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 185
+#define GCC_QUPV3_WRAP1_S1_CLK 186
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 187
+#define GCC_QUPV3_WRAP1_S2_CLK 188
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 189
+#define GCC_QUPV3_WRAP1_S2_DIV_CLK_SRC 190
+#define GCC_QUPV3_WRAP1_S3_CLK 191
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 192
+#define GCC_QUPV3_WRAP1_S3_DIV_CLK_SRC 193
+#define GCC_QUPV3_WRAP1_S4_CLK 194
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 195
+#define GCC_QUPV3_WRAP1_S5_CLK 196
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_S6_CLK 198
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 199
+#define GCC_QUPV3_WRAP1_S7_CLK 200
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 201
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 202
+#define GCC_QUPV3_WRAP2_CORE_CLK 203
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK 204
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK 205
+#define GCC_QUPV3_WRAP2_S0_CLK 206
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 207
+#define GCC_QUPV3_WRAP2_S1_CLK 208
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 209
+#define GCC_QUPV3_WRAP2_S2_CLK 210
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 211
+#define GCC_QUPV3_WRAP2_S2_DIV_CLK_SRC 212
+#define GCC_QUPV3_WRAP2_S3_CLK 213
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 214
+#define GCC_QUPV3_WRAP2_S3_DIV_CLK_SRC 215
+#define GCC_QUPV3_WRAP2_S4_CLK 216
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_S5_CLK 218
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 219
+#define GCC_QUPV3_WRAP2_S6_CLK 220
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 221
+#define GCC_QUPV3_WRAP2_S7_CLK 222
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 223
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 224
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 225
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 226
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 227
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 228
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 229
+#define GCC_SDCC2_AHB_CLK 230
+#define GCC_SDCC2_APPS_CLK 231
+#define GCC_SDCC2_APPS_CLK_SRC 232
+#define GCC_SDCC4_AHB_CLK 233
+#define GCC_SDCC4_APPS_CLK 234
+#define GCC_SDCC4_APPS_CLK_SRC 235
+#define GCC_SYS_NOC_USB_AXI_CLK 236
+#define GCC_UFS_PHY_AHB_CLK 237
+#define GCC_UFS_PHY_AXI_CLK 238
+#define GCC_UFS_PHY_AXI_CLK_SRC 239
+#define GCC_UFS_PHY_ICE_CORE_CLK 240
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 241
+#define GCC_UFS_PHY_PHY_AUX_CLK 242
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 243
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 244
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 245
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 246
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 247
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 248
+#define GCC_USB20_MASTER_CLK 249
+#define GCC_USB20_MASTER_CLK_SRC 250
+#define GCC_USB20_MOCK_UTMI_CLK 251
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 252
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 253
+#define GCC_USB20_SLEEP_CLK 254
+#define GCC_USB30_MP_MASTER_CLK 255
+#define GCC_USB30_MP_MASTER_CLK_SRC 256
+#define GCC_USB30_MP_MOCK_UTMI_CLK 257
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 258
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 259
+#define GCC_USB30_MP_SLEEP_CLK 260
+#define GCC_USB30_PRIM_MASTER_CLK 261
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 262
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 263
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 264
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 265
+#define GCC_USB30_PRIM_SLEEP_CLK 266
+#define GCC_USB30_SEC_MASTER_CLK 267
+#define GCC_USB30_SEC_MASTER_CLK_SRC 268
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 269
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 270
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 271
+#define GCC_USB30_SEC_SLEEP_CLK 272
+#define GCC_USB30_TERT_MASTER_CLK 273
+#define GCC_USB30_TERT_MASTER_CLK_SRC 274
+#define GCC_USB30_TERT_MOCK_UTMI_CLK 275
+#define GCC_USB30_TERT_MOCK_UTMI_CLK_SRC 276
+#define GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC 277
+#define GCC_USB30_TERT_SLEEP_CLK 278
+#define GCC_USB3_MP_PHY_AUX_CLK 279
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 280
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 281
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 282
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 283
+#define GCC_USB3_PRIM_PHY_AUX_CLK 284
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 285
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 286
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 287
+#define GCC_USB3_SEC_PHY_AUX_CLK 288
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 289
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 290
+#define GCC_USB3_SEC_PHY_PIPE_CLK 291
+#define GCC_USB3_TERT_PHY_AUX_CLK 292
+#define GCC_USB3_TERT_PHY_AUX_CLK_SRC 293
+#define GCC_USB3_TERT_PHY_COM_AUX_CLK 294
+#define GCC_USB3_TERT_PHY_PIPE_CLK 295
+#define GCC_USB4_0_CFG_AHB_CLK 296
+#define GCC_USB4_0_DP0_CLK 297
+#define GCC_USB4_0_DP1_CLK 298
+#define GCC_USB4_0_MASTER_CLK 299
+#define GCC_USB4_0_MASTER_CLK_SRC 300
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK 301
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK 302
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC 303
+#define GCC_USB4_0_PHY_RX0_CLK 304
+#define GCC_USB4_0_PHY_RX1_CLK 305
+#define GCC_USB4_0_PHY_USB_PIPE_CLK 306
+#define GCC_USB4_0_SB_IF_CLK 307
+#define GCC_USB4_0_SB_IF_CLK_SRC 308
+#define GCC_USB4_0_SYS_CLK 309
+#define GCC_USB4_0_TMU_CLK 310
+#define GCC_USB4_0_TMU_CLK_SRC 311
+#define GCC_USB4_1_CFG_AHB_CLK 312
+#define GCC_USB4_1_DP0_CLK 313
+#define GCC_USB4_1_DP1_CLK 314
+#define GCC_USB4_1_MASTER_CLK 315
+#define GCC_USB4_1_MASTER_CLK_SRC 316
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 317
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 318
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 319
+#define GCC_USB4_1_PHY_RX0_CLK 320
+#define GCC_USB4_1_PHY_RX1_CLK 321
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 322
+#define GCC_USB4_1_SB_IF_CLK 323
+#define GCC_USB4_1_SB_IF_CLK_SRC 324
+#define GCC_USB4_1_SYS_CLK 325
+#define GCC_USB4_1_TMU_CLK 326
+#define GCC_USB4_1_TMU_CLK_SRC 327
+#define GCC_USB4_2_CFG_AHB_CLK 328
+#define GCC_USB4_2_DP0_CLK 329
+#define GCC_USB4_2_DP1_CLK 330
+#define GCC_USB4_2_MASTER_CLK 331
+#define GCC_USB4_2_MASTER_CLK_SRC 332
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK 333
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_2_PHY_RX0_CLK 336
+#define GCC_USB4_2_PHY_RX1_CLK 337
+#define GCC_USB4_2_PHY_USB_PIPE_CLK 338
+#define GCC_USB4_2_SB_IF_CLK 339
+#define GCC_USB4_2_SB_IF_CLK_SRC 340
+#define GCC_USB4_2_SYS_CLK 341
+#define GCC_USB4_2_TMU_CLK 342
+#define GCC_USB4_2_TMU_CLK_SRC 343
+#define GCC_VIDEO_AHB_CLK 344
+#define GCC_VIDEO_AXI0_CLK 345
+#define GCC_VIDEO_AXI1_CLK 346
+#define GCC_VIDEO_XO_CLK 347
+#define GCC_PCIE_3_PIPE_CLK_SRC 348
+#define GCC_PCIE_4_PIPE_CLK_SRC 349
+#define GCC_PCIE_5_PIPE_CLK_SRC 350
+#define GCC_PCIE_6A_PIPE_CLK_SRC 351
+#define GCC_PCIE_6B_PIPE_CLK_SRC 352
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 353
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 354
+#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 355
+
+/* GCC power domains */
+#define GCC_PCIE_0_TUNNEL_GDSC 0
+#define GCC_PCIE_1_TUNNEL_GDSC 1
+#define GCC_PCIE_2_TUNNEL_GDSC 2
+#define GCC_PCIE_3_GDSC 3
+#define GCC_PCIE_3_PHY_GDSC 4
+#define GCC_PCIE_4_GDSC 5
+#define GCC_PCIE_4_PHY_GDSC 6
+#define GCC_PCIE_5_GDSC 7
+#define GCC_PCIE_5_PHY_GDSC 8
+#define GCC_PCIE_6_PHY_GDSC 9
+#define GCC_PCIE_6A_GDSC 10
+#define GCC_PCIE_6B_GDSC 11
+#define GCC_UFS_MEM_PHY_GDSC 12
+#define GCC_UFS_PHY_GDSC 13
+#define GCC_USB20_PRIM_GDSC 14
+#define GCC_USB30_MP_GDSC 15
+#define GCC_USB30_PRIM_GDSC 16
+#define GCC_USB30_SEC_GDSC 17
+#define GCC_USB30_TERT_GDSC 18
+#define GCC_USB3_MP_SS0_PHY_GDSC 19
+#define GCC_USB3_MP_SS1_PHY_GDSC 20
+#define GCC_USB4_0_GDSC 21
+#define GCC_USB4_1_GDSC 22
+#define GCC_USB4_2_GDSC 23
+#define GCC_USB_0_PHY_GDSC 24
+#define GCC_USB_1_PHY_GDSC 25
+#define GCC_USB_2_PHY_GDSC 26
+
+/* GCC resets */
+#define GCC_AV1E_BCR 0
+#define GCC_CAMERA_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_0_TUNNEL_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_1_TUNNEL_BCR 13
+#define GCC_PCIE_2_LINK_DOWN_BCR 14
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 15
+#define GCC_PCIE_2_PHY_BCR 16
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 17
+#define GCC_PCIE_2_TUNNEL_BCR 18
+#define GCC_PCIE_3_BCR 19
+#define GCC_PCIE_3_LINK_DOWN_BCR 20
+#define GCC_PCIE_3_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3_PHY_BCR 22
+#define GCC_PCIE_3_PHY_NOCSR_COM_PHY_BCR 23
+#define GCC_PCIE_4_BCR 24
+#define GCC_PCIE_4_LINK_DOWN_BCR 25
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_4_PHY_BCR 27
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 28
+#define GCC_PCIE_5_BCR 29
+#define GCC_PCIE_5_LINK_DOWN_BCR 30
+#define GCC_PCIE_5_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_5_PHY_BCR 32
+#define GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR 33
+#define GCC_PCIE_6A_BCR 34
+#define GCC_PCIE_6A_LINK_DOWN_BCR 35
+#define GCC_PCIE_6A_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_6A_PHY_BCR 37
+#define GCC_PCIE_6A_PHY_NOCSR_COM_PHY_BCR 38
+#define GCC_PCIE_6B_BCR 39
+#define GCC_PCIE_6B_LINK_DOWN_BCR 40
+#define GCC_PCIE_6B_NOCSR_COM_PHY_BCR 41
+#define GCC_PCIE_6B_PHY_BCR 42
+#define GCC_PCIE_6B_PHY_NOCSR_COM_PHY_BCR 43
+#define GCC_PCIE_PHY_BCR 44
+#define GCC_PCIE_PHY_CFG_AHB_BCR 45
+#define GCC_PCIE_PHY_COM_BCR 46
+#define GCC_PCIE_RSCC_BCR 47
+#define GCC_PDM_BCR 48
+#define GCC_QUPV3_WRAPPER_0_BCR 49
+#define GCC_QUPV3_WRAPPER_1_BCR 50
+#define GCC_QUPV3_WRAPPER_2_BCR 51
+#define GCC_QUSB2PHY_HS0_MP_BCR 52
+#define GCC_QUSB2PHY_HS1_MP_BCR 53
+#define GCC_QUSB2PHY_PRIM_BCR 54
+#define GCC_QUSB2PHY_SEC_BCR 55
+#define GCC_QUSB2PHY_TERT_BCR 56
+#define GCC_QUSB2PHY_USB20_HS_BCR 57
+#define GCC_SDCC2_BCR 58
+#define GCC_SDCC4_BCR 59
+#define GCC_UFS_PHY_BCR 60
+#define GCC_USB20_PRIM_BCR 61
+#define GCC_USB30_MP_BCR 62
+#define GCC_USB30_PRIM_BCR 63
+#define GCC_USB30_SEC_BCR 64
+#define GCC_USB30_TERT_BCR 65
+#define GCC_USB3_MP_SS0_PHY_BCR 66
+#define GCC_USB3_MP_SS1_PHY_BCR 67
+#define GCC_USB3_PHY_PRIM_BCR 68
+#define GCC_USB3_PHY_SEC_BCR 69
+#define GCC_USB3_PHY_TERT_BCR 70
+#define GCC_USB3_UNIPHY_MP0_BCR 71
+#define GCC_USB3_UNIPHY_MP1_BCR 72
+#define GCC_USB3PHY_PHY_PRIM_BCR 73
+#define GCC_USB3PHY_PHY_SEC_BCR 74
+#define GCC_USB3PHY_PHY_TERT_BCR 75
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 76
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 77
+#define GCC_USB4_0_BCR 78
+#define GCC_USB4_0_DP0_PHY_PRIM_BCR 79
+#define GCC_USB4_1_DP0_PHY_SEC_BCR 80
+#define GCC_USB4_2_DP0_PHY_TERT_BCR 81
+#define GCC_USB4_1_BCR 82
+#define GCC_USB4_2_BCR 83
+#define GCC_USB_0_PHY_BCR 84
+#define GCC_USB_1_PHY_BCR 85
+#define GCC_USB_2_PHY_BCR 86
+#define GCC_VIDEO_BCR 87
+#endif
diff --git a/include/dt-bindings/clock/sophgo,cv1800.h b/include/dt-bindings/clock/sophgo,cv1800.h
new file mode 100644
index 0000000000..cfbeca25a6
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,cv1800.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+#define __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+
+#define CLK_MPLL 0
+#define CLK_TPLL 1
+#define CLK_FPLL 2
+#define CLK_MIPIMPLL 3
+#define CLK_A0PLL 4
+#define CLK_DISPPLL 5
+#define CLK_CAM0PLL 6
+#define CLK_CAM1PLL 7
+
+#define CLK_MIPIMPLL_D3 8
+#define CLK_CAM0PLL_D2 9
+#define CLK_CAM0PLL_D3 10
+
+#define CLK_TPU 11
+#define CLK_TPU_FAB 12
+#define CLK_AHB_ROM 13
+#define CLK_DDR_AXI_REG 14
+#define CLK_RTC_25M 15
+#define CLK_SRC_RTC_SYS_0 16
+#define CLK_TEMPSEN 17
+#define CLK_SARADC 18
+#define CLK_EFUSE 19
+#define CLK_APB_EFUSE 20
+#define CLK_DEBUG 21
+#define CLK_AP_DEBUG 22
+#define CLK_XTAL_MISC 23
+#define CLK_AXI4_EMMC 24
+#define CLK_EMMC 25
+#define CLK_EMMC_100K 26
+#define CLK_AXI4_SD0 27
+#define CLK_SD0 28
+#define CLK_SD0_100K 29
+#define CLK_AXI4_SD1 30
+#define CLK_SD1 31
+#define CLK_SD1_100K 32
+#define CLK_SPI_NAND 33
+#define CLK_ETH0_500M 34
+#define CLK_AXI4_ETH0 35
+#define CLK_ETH1_500M 36
+#define CLK_AXI4_ETH1 37
+#define CLK_APB_GPIO 38
+#define CLK_APB_GPIO_INTR 39
+#define CLK_GPIO_DB 40
+#define CLK_AHB_SF 41
+#define CLK_AHB_SF1 42
+#define CLK_A24M 43
+#define CLK_AUDSRC 44
+#define CLK_APB_AUDSRC 45
+#define CLK_SDMA_AXI 46
+#define CLK_SDMA_AUD0 47
+#define CLK_SDMA_AUD1 48
+#define CLK_SDMA_AUD2 49
+#define CLK_SDMA_AUD3 50
+#define CLK_I2C 51
+#define CLK_APB_I2C 52
+#define CLK_APB_I2C0 53
+#define CLK_APB_I2C1 54
+#define CLK_APB_I2C2 55
+#define CLK_APB_I2C3 56
+#define CLK_APB_I2C4 57
+#define CLK_APB_WDT 58
+#define CLK_PWM_SRC 59
+#define CLK_PWM 60
+#define CLK_SPI 61
+#define CLK_APB_SPI0 62
+#define CLK_APB_SPI1 63
+#define CLK_APB_SPI2 64
+#define CLK_APB_SPI3 65
+#define CLK_1M 66
+#define CLK_CAM0_200 67
+#define CLK_PM 68
+#define CLK_TIMER0 69
+#define CLK_TIMER1 70
+#define CLK_TIMER2 71
+#define CLK_TIMER3 72
+#define CLK_TIMER4 73
+#define CLK_TIMER5 74
+#define CLK_TIMER6 75
+#define CLK_TIMER7 76
+#define CLK_UART0 77
+#define CLK_APB_UART0 78
+#define CLK_UART1 79
+#define CLK_APB_UART1 80
+#define CLK_UART2 81
+#define CLK_APB_UART2 82
+#define CLK_UART3 83
+#define CLK_APB_UART3 84
+#define CLK_UART4 85
+#define CLK_APB_UART4 86
+#define CLK_APB_I2S0 87
+#define CLK_APB_I2S1 88
+#define CLK_APB_I2S2 89
+#define CLK_APB_I2S3 90
+#define CLK_AXI4_USB 91
+#define CLK_APB_USB 92
+#define CLK_USB_125M 93
+#define CLK_USB_33K 94
+#define CLK_USB_12M 95
+#define CLK_AXI4 96
+#define CLK_AXI6 97
+#define CLK_DSI_ESC 98
+#define CLK_AXI_VIP 99
+#define CLK_SRC_VIP_SYS_0 100
+#define CLK_SRC_VIP_SYS_1 101
+#define CLK_SRC_VIP_SYS_2 102
+#define CLK_SRC_VIP_SYS_3 103
+#define CLK_SRC_VIP_SYS_4 104
+#define CLK_CSI_BE_VIP 105
+#define CLK_CSI_MAC0_VIP 106
+#define CLK_CSI_MAC1_VIP 107
+#define CLK_CSI_MAC2_VIP 108
+#define CLK_CSI0_RX_VIP 109
+#define CLK_CSI1_RX_VIP 110
+#define CLK_ISP_TOP_VIP 111
+#define CLK_IMG_D_VIP 112
+#define CLK_IMG_V_VIP 113
+#define CLK_SC_TOP_VIP 114
+#define CLK_SC_D_VIP 115
+#define CLK_SC_V1_VIP 116
+#define CLK_SC_V2_VIP 117
+#define CLK_SC_V3_VIP 118
+#define CLK_DWA_VIP 119
+#define CLK_BT_VIP 120
+#define CLK_DISP_VIP 121
+#define CLK_DSI_MAC_VIP 122
+#define CLK_LVDS0_VIP 123
+#define CLK_LVDS1_VIP 124
+#define CLK_PAD_VI_VIP 125
+#define CLK_PAD_VI1_VIP 126
+#define CLK_PAD_VI2_VIP 127
+#define CLK_CFG_REG_VIP 128
+#define CLK_VIP_IP0 129
+#define CLK_VIP_IP1 130
+#define CLK_VIP_IP2 131
+#define CLK_VIP_IP3 132
+#define CLK_IVE_VIP 133
+#define CLK_RAW_VIP 134
+#define CLK_OSDC_VIP 135
+#define CLK_CAM0_VIP 136
+#define CLK_AXI_VIDEO_CODEC 137
+#define CLK_VC_SRC0 138
+#define CLK_VC_SRC1 139
+#define CLK_VC_SRC2 140
+#define CLK_H264C 141
+#define CLK_APB_H264C 142
+#define CLK_H265C 143
+#define CLK_APB_H265C 144
+#define CLK_JPEG 145
+#define CLK_APB_JPEG 146
+#define CLK_CAM0 147
+#define CLK_CAM1 148
+#define CLK_WGN 149
+#define CLK_WGN0 150
+#define CLK_WGN1 151
+#define CLK_WGN2 152
+#define CLK_KEYSCAN 153
+#define CLK_CFG_REG_VC 154
+#define CLK_C906_0 155
+#define CLK_C906_1 156
+#define CLK_A53 157
+#define CLK_CPU_AXI0 158
+#define CLK_CPU_GIC 159
+#define CLK_XTAL_AP 160
+
+// Only for CV181x
+#define CLK_DISP_SRC_VIP 161
+
+#endif /* __DT_BINDINGS_SOPHGO_CV1800_CLK_H__ */
diff --git a/include/dt-bindings/clock/st,stm32mp25-rcc.h b/include/dt-bindings/clock/st,stm32mp25-rcc.h
new file mode 100644
index 0000000000..b6cf05ad4b
--- /dev/null
+++ b/include/dt-bindings/clock/st,stm32mp25-rcc.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_CLKS_H_
+#define _DT_BINDINGS_STM32MP25_CLKS_H_
+
+/* INTERNAL/EXTERNAL OSCILLATORS */
+#define HSI_CK 0
+#define HSE_CK 1
+#define MSI_CK 2
+#define LSI_CK 3
+#define LSE_CK 4
+#define I2S_CK 5
+#define RTC_CK 6
+#define SPDIF_CK_SYMB 7
+
+/* PLL CLOCKS */
+#define PLL1_CK 8
+#define PLL2_CK 9
+#define PLL3_CK 10
+#define PLL4_CK 11
+#define PLL5_CK 12
+#define PLL6_CK 13
+#define PLL7_CK 14
+#define PLL8_CK 15
+
+#define CK_CPU1 16
+
+/* APB DIV CLOCKS */
+#define CK_ICN_APB1 17
+#define CK_ICN_APB2 18
+#define CK_ICN_APB3 19
+#define CK_ICN_APB4 20
+#define CK_ICN_APBDBG 21
+
+/* GLOBAL TIMER */
+#define TIMG1_CK 22
+#define TIMG2_CK 23
+
+/* FLEXGEN CLOCKS */
+#define CK_ICN_HS_MCU 24
+#define CK_ICN_SDMMC 25
+#define CK_ICN_DDR 26
+#define CK_ICN_DISPLAY 27
+#define CK_ICN_HSL 28
+#define CK_ICN_NIC 29
+#define CK_ICN_VID 30
+#define CK_FLEXGEN_07 31
+#define CK_FLEXGEN_08 32
+#define CK_FLEXGEN_09 33
+#define CK_FLEXGEN_10 34
+#define CK_FLEXGEN_11 35
+#define CK_FLEXGEN_12 36
+#define CK_FLEXGEN_13 37
+#define CK_FLEXGEN_14 38
+#define CK_FLEXGEN_15 39
+#define CK_FLEXGEN_16 40
+#define CK_FLEXGEN_17 41
+#define CK_FLEXGEN_18 42
+#define CK_FLEXGEN_19 43
+#define CK_FLEXGEN_20 44
+#define CK_FLEXGEN_21 45
+#define CK_FLEXGEN_22 46
+#define CK_FLEXGEN_23 47
+#define CK_FLEXGEN_24 48
+#define CK_FLEXGEN_25 49
+#define CK_FLEXGEN_26 50
+#define CK_FLEXGEN_27 51
+#define CK_FLEXGEN_28 52
+#define CK_FLEXGEN_29 53
+#define CK_FLEXGEN_30 54
+#define CK_FLEXGEN_31 55
+#define CK_FLEXGEN_32 56
+#define CK_FLEXGEN_33 57
+#define CK_FLEXGEN_34 58
+#define CK_FLEXGEN_35 59
+#define CK_FLEXGEN_36 60
+#define CK_FLEXGEN_37 61
+#define CK_FLEXGEN_38 62
+#define CK_FLEXGEN_39 63
+#define CK_FLEXGEN_40 64
+#define CK_FLEXGEN_41 65
+#define CK_FLEXGEN_42 66
+#define CK_FLEXGEN_43 67
+#define CK_FLEXGEN_44 68
+#define CK_FLEXGEN_45 69
+#define CK_FLEXGEN_46 70
+#define CK_FLEXGEN_47 71
+#define CK_FLEXGEN_48 72
+#define CK_FLEXGEN_49 73
+#define CK_FLEXGEN_50 74
+#define CK_FLEXGEN_51 75
+#define CK_FLEXGEN_52 76
+#define CK_FLEXGEN_53 77
+#define CK_FLEXGEN_54 78
+#define CK_FLEXGEN_55 79
+#define CK_FLEXGEN_56 80
+#define CK_FLEXGEN_57 81
+#define CK_FLEXGEN_58 82
+#define CK_FLEXGEN_59 83
+#define CK_FLEXGEN_60 84
+#define CK_FLEXGEN_61 85
+#define CK_FLEXGEN_62 86
+#define CK_FLEXGEN_63 87
+
+/* LOW SPEED MCU CLOCK */
+#define CK_ICN_LS_MCU 88
+
+#define CK_BUS_STM500 89
+#define CK_BUS_FMC 90
+#define CK_BUS_GPU 91
+#define CK_BUS_ETH1 92
+#define CK_BUS_ETH2 93
+#define CK_BUS_PCIE 94
+#define CK_BUS_DDRPHYC 95
+#define CK_BUS_SYSCPU1 96
+#define CK_BUS_ETHSW 97
+#define CK_BUS_HPDMA1 98
+#define CK_BUS_HPDMA2 99
+#define CK_BUS_HPDMA3 100
+#define CK_BUS_ADC12 101
+#define CK_BUS_ADC3 102
+#define CK_BUS_IPCC1 103
+#define CK_BUS_CCI 104
+#define CK_BUS_CRC 105
+#define CK_BUS_MDF1 106
+#define CK_BUS_OSPIIOM 107
+#define CK_BUS_BKPSRAM 108
+#define CK_BUS_HASH 109
+#define CK_BUS_RNG 110
+#define CK_BUS_CRYP1 111
+#define CK_BUS_CRYP2 112
+#define CK_BUS_SAES 113
+#define CK_BUS_PKA 114
+#define CK_BUS_GPIOA 115
+#define CK_BUS_GPIOB 116
+#define CK_BUS_GPIOC 117
+#define CK_BUS_GPIOD 118
+#define CK_BUS_GPIOE 119
+#define CK_BUS_GPIOF 120
+#define CK_BUS_GPIOG 121
+#define CK_BUS_GPIOH 122
+#define CK_BUS_GPIOI 123
+#define CK_BUS_GPIOJ 124
+#define CK_BUS_GPIOK 125
+#define CK_BUS_LPSRAM1 126
+#define CK_BUS_LPSRAM2 127
+#define CK_BUS_LPSRAM3 128
+#define CK_BUS_GPIOZ 129
+#define CK_BUS_LPDMA 130
+#define CK_BUS_HSEM 131
+#define CK_BUS_IPCC2 132
+#define CK_BUS_RTC 133
+#define CK_BUS_SPI8 134
+#define CK_BUS_LPUART1 135
+#define CK_BUS_I2C8 136
+#define CK_BUS_LPTIM3 137
+#define CK_BUS_LPTIM4 138
+#define CK_BUS_LPTIM5 139
+#define CK_BUS_IWDG5 140
+#define CK_BUS_WWDG2 141
+#define CK_BUS_I3C4 142
+#define CK_BUS_TIM2 143
+#define CK_BUS_TIM3 144
+#define CK_BUS_TIM4 145
+#define CK_BUS_TIM5 146
+#define CK_BUS_TIM6 147
+#define CK_BUS_TIM7 148
+#define CK_BUS_TIM10 149
+#define CK_BUS_TIM11 150
+#define CK_BUS_TIM12 151
+#define CK_BUS_TIM13 152
+#define CK_BUS_TIM14 153
+#define CK_BUS_LPTIM1 154
+#define CK_BUS_LPTIM2 155
+#define CK_BUS_SPI2 156
+#define CK_BUS_SPI3 157
+#define CK_BUS_SPDIFRX 158
+#define CK_BUS_USART2 159
+#define CK_BUS_USART3 160
+#define CK_BUS_UART4 161
+#define CK_BUS_UART5 162
+#define CK_BUS_I2C1 163
+#define CK_BUS_I2C2 164
+#define CK_BUS_I2C3 165
+#define CK_BUS_I2C4 166
+#define CK_BUS_I2C5 167
+#define CK_BUS_I2C6 168
+#define CK_BUS_I2C7 169
+#define CK_BUS_I3C1 170
+#define CK_BUS_I3C2 171
+#define CK_BUS_I3C3 172
+#define CK_BUS_TIM1 173
+#define CK_BUS_TIM8 174
+#define CK_BUS_TIM15 175
+#define CK_BUS_TIM16 176
+#define CK_BUS_TIM17 177
+#define CK_BUS_TIM20 178
+#define CK_BUS_SAI1 179
+#define CK_BUS_SAI2 180
+#define CK_BUS_SAI3 181
+#define CK_BUS_SAI4 182
+#define CK_BUS_USART1 183
+#define CK_BUS_USART6 184
+#define CK_BUS_UART7 185
+#define CK_BUS_UART8 186
+#define CK_BUS_UART9 187
+#define CK_BUS_FDCAN 188
+#define CK_BUS_SPI1 189
+#define CK_BUS_SPI4 190
+#define CK_BUS_SPI5 191
+#define CK_BUS_SPI6 192
+#define CK_BUS_SPI7 193
+#define CK_BUS_BSEC 194
+#define CK_BUS_IWDG1 195
+#define CK_BUS_IWDG2 196
+#define CK_BUS_IWDG3 197
+#define CK_BUS_IWDG4 198
+#define CK_BUS_WWDG1 199
+#define CK_BUS_VREF 200
+#define CK_BUS_DTS 201
+#define CK_BUS_SERC 202
+#define CK_BUS_HDP 203
+#define CK_BUS_IS2M 204
+#define CK_BUS_DSI 205
+#define CK_BUS_LTDC 206
+#define CK_BUS_CSI 207
+#define CK_BUS_DCMIPP 208
+#define CK_BUS_DDRC 209
+#define CK_BUS_DDRCFG 210
+#define CK_BUS_GICV2M 211
+#define CK_BUS_USBTC 212
+#define CK_BUS_USB3PCIEPHY 214
+#define CK_BUS_STGEN 215
+#define CK_BUS_VDEC 216
+#define CK_BUS_VENC 217
+#define CK_SYSDBG 218
+#define CK_KER_TIM2 219
+#define CK_KER_TIM3 220
+#define CK_KER_TIM4 221
+#define CK_KER_TIM5 222
+#define CK_KER_TIM6 223
+#define CK_KER_TIM7 224
+#define CK_KER_TIM10 225
+#define CK_KER_TIM11 226
+#define CK_KER_TIM12 227
+#define CK_KER_TIM13 228
+#define CK_KER_TIM14 229
+#define CK_KER_TIM1 230
+#define CK_KER_TIM8 231
+#define CK_KER_TIM15 232
+#define CK_KER_TIM16 233
+#define CK_KER_TIM17 234
+#define CK_KER_TIM20 235
+#define CK_BUS_SYSRAM 236
+#define CK_BUS_VDERAM 237
+#define CK_BUS_RETRAM 238
+#define CK_BUS_OSPI1 239
+#define CK_BUS_OSPI2 240
+#define CK_BUS_OTFD1 241
+#define CK_BUS_OTFD2 242
+#define CK_BUS_SRAM1 243
+#define CK_BUS_SRAM2 244
+#define CK_BUS_SDMMC1 245
+#define CK_BUS_SDMMC2 246
+#define CK_BUS_SDMMC3 247
+#define CK_BUS_DDR 248
+#define CK_BUS_RISAF4 249
+#define CK_BUS_USB2OHCI 250
+#define CK_BUS_USB2EHCI 251
+#define CK_BUS_USB3DR 252
+#define CK_KER_LPTIM1 253
+#define CK_KER_LPTIM2 254
+#define CK_KER_USART2 255
+#define CK_KER_UART4 256
+#define CK_KER_USART3 257
+#define CK_KER_UART5 258
+#define CK_KER_SPI2 259
+#define CK_KER_SPI3 260
+#define CK_KER_SPDIFRX 261
+#define CK_KER_I2C1 262
+#define CK_KER_I2C2 263
+#define CK_KER_I3C1 264
+#define CK_KER_I3C2 265
+#define CK_KER_I2C3 266
+#define CK_KER_I2C5 267
+#define CK_KER_I3C3 268
+#define CK_KER_I2C4 269
+#define CK_KER_I2C6 270
+#define CK_KER_I2C7 271
+#define CK_KER_SPI1 272
+#define CK_KER_SPI4 273
+#define CK_KER_SPI5 274
+#define CK_KER_SPI6 275
+#define CK_KER_SPI7 276
+#define CK_KER_USART1 277
+#define CK_KER_USART6 278
+#define CK_KER_UART7 279
+#define CK_KER_UART8 280
+#define CK_KER_UART9 281
+#define CK_KER_MDF1 282
+#define CK_KER_SAI1 283
+#define CK_KER_SAI2 284
+#define CK_KER_SAI3 285
+#define CK_KER_SAI4 286
+#define CK_KER_FDCAN 287
+#define CK_KER_DSIBLANE 288
+#define CK_KER_DSIPHY 289
+#define CK_KER_CSI 290
+#define CK_KER_CSITXESC 291
+#define CK_KER_CSIPHY 292
+#define CK_KER_LVDSPHY 293
+#define CK_KER_STGEN 294
+#define CK_KER_USB3PCIEPHY 295
+#define CK_KER_USB2PHY2EN 296
+#define CK_KER_I3C4 297
+#define CK_KER_SPI8 298
+#define CK_KER_I2C8 299
+#define CK_KER_LPUART1 300
+#define CK_KER_LPTIM3 301
+#define CK_KER_LPTIM4 302
+#define CK_KER_LPTIM5 303
+#define CK_KER_TSDBG 304
+#define CK_KER_TPIU 305
+#define CK_BUS_ETR 306
+#define CK_BUS_SYSATB 307
+#define CK_KER_ADC12 308
+#define CK_KER_ADC3 309
+#define CK_KER_OSPI1 310
+#define CK_KER_OSPI2 311
+#define CK_KER_FMC 312
+#define CK_KER_SDMMC1 313
+#define CK_KER_SDMMC2 314
+#define CK_KER_SDMMC3 315
+#define CK_KER_ETH1 316
+#define CK_KER_ETH2 317
+#define CK_KER_ETH1PTP 318
+#define CK_KER_ETH2PTP 319
+#define CK_KER_USB2PHY1 320
+#define CK_KER_USB2PHY2 321
+#define CK_KER_ETHSW 322
+#define CK_KER_ETHSWREF 323
+#define CK_MCO1 324
+#define CK_MCO2 325
+#define CK_KER_DTS 326
+#define CK_ETH1_RX 327
+#define CK_ETH1_TX 328
+#define CK_ETH1_MAC 329
+#define CK_ETH2_RX 330
+#define CK_ETH2_TX 331
+#define CK_ETH2_MAC 332
+#define CK_ETH1_STP 333
+#define CK_ETH2_STP 334
+#define CK_KER_USBTC 335
+#define CK_BUS_ADF1 336
+#define CK_KER_ADF1 337
+#define CK_BUS_LVDS 338
+#define CK_KER_LTDC 339
+#define CK_KER_GPU 340
+#define CK_BUS_ETHSWACMCFG 341
+#define CK_BUS_ETHSWACMMSG 342
+#define HSE_DIV2_CK 343
+
+#define STM32MP25_LAST_CLK 344
+
+#define CK_SCMI_ICN_HS_MCU 0
+#define CK_SCMI_ICN_SDMMC 1
+#define CK_SCMI_ICN_DDR 2
+#define CK_SCMI_ICN_DISPLAY 3
+#define CK_SCMI_ICN_HSL 4
+#define CK_SCMI_ICN_NIC 5
+#define CK_SCMI_ICN_VID 6
+#define CK_SCMI_FLEXGEN_07 7
+#define CK_SCMI_FLEXGEN_08 8
+#define CK_SCMI_FLEXGEN_09 9
+#define CK_SCMI_FLEXGEN_10 10
+#define CK_SCMI_FLEXGEN_11 11
+#define CK_SCMI_FLEXGEN_12 12
+#define CK_SCMI_FLEXGEN_13 13
+#define CK_SCMI_FLEXGEN_14 14
+#define CK_SCMI_FLEXGEN_15 15
+#define CK_SCMI_FLEXGEN_16 16
+#define CK_SCMI_FLEXGEN_17 17
+#define CK_SCMI_FLEXGEN_18 18
+#define CK_SCMI_FLEXGEN_19 19
+#define CK_SCMI_FLEXGEN_20 20
+#define CK_SCMI_FLEXGEN_21 21
+#define CK_SCMI_FLEXGEN_22 22
+#define CK_SCMI_FLEXGEN_23 23
+#define CK_SCMI_FLEXGEN_24 24
+#define CK_SCMI_FLEXGEN_25 25
+#define CK_SCMI_FLEXGEN_26 26
+#define CK_SCMI_FLEXGEN_27 27
+#define CK_SCMI_FLEXGEN_28 28
+#define CK_SCMI_FLEXGEN_29 29
+#define CK_SCMI_FLEXGEN_30 30
+#define CK_SCMI_FLEXGEN_31 31
+#define CK_SCMI_FLEXGEN_32 32
+#define CK_SCMI_FLEXGEN_33 33
+#define CK_SCMI_FLEXGEN_34 34
+#define CK_SCMI_FLEXGEN_35 35
+#define CK_SCMI_FLEXGEN_36 36
+#define CK_SCMI_FLEXGEN_37 37
+#define CK_SCMI_FLEXGEN_38 38
+#define CK_SCMI_FLEXGEN_39 39
+#define CK_SCMI_FLEXGEN_40 40
+#define CK_SCMI_FLEXGEN_41 41
+#define CK_SCMI_FLEXGEN_42 42
+#define CK_SCMI_FLEXGEN_43 43
+#define CK_SCMI_FLEXGEN_44 44
+#define CK_SCMI_FLEXGEN_45 45
+#define CK_SCMI_FLEXGEN_46 46
+#define CK_SCMI_FLEXGEN_47 47
+#define CK_SCMI_FLEXGEN_48 48
+#define CK_SCMI_FLEXGEN_49 49
+#define CK_SCMI_FLEXGEN_50 50
+#define CK_SCMI_FLEXGEN_51 51
+#define CK_SCMI_FLEXGEN_52 52
+#define CK_SCMI_FLEXGEN_53 53
+#define CK_SCMI_FLEXGEN_54 54
+#define CK_SCMI_FLEXGEN_55 55
+#define CK_SCMI_FLEXGEN_56 56
+#define CK_SCMI_FLEXGEN_57 57
+#define CK_SCMI_FLEXGEN_58 58
+#define CK_SCMI_FLEXGEN_59 59
+#define CK_SCMI_FLEXGEN_60 60
+#define CK_SCMI_FLEXGEN_61 61
+#define CK_SCMI_FLEXGEN_62 62
+#define CK_SCMI_FLEXGEN_63 63
+#define CK_SCMI_ICN_LS_MCU 64
+#define CK_SCMI_HSE 65
+#define CK_SCMI_LSE 66
+#define CK_SCMI_HSI 67
+#define CK_SCMI_LSI 68
+#define CK_SCMI_MSI 69
+#define CK_SCMI_HSE_DIV2 70
+#define CK_SCMI_CPU1 71
+#define CK_SCMI_SYSCPU1 72
+#define CK_SCMI_PLL2 73
+#define CK_SCMI_PLL3 74
+#define CK_SCMI_RTC 75
+#define CK_SCMI_RTCCK 76
+#define CK_SCMI_ICN_APB1 77
+#define CK_SCMI_ICN_APB2 78
+#define CK_SCMI_ICN_APB3 79
+#define CK_SCMI_ICN_APB4 80
+#define CK_SCMI_ICN_APBDBG 81
+#define CK_SCMI_TIMG1 82
+#define CK_SCMI_TIMG2 83
+#define CK_SCMI_BKPSRAM 84
+#define CK_SCMI_BSEC 85
+#define CK_SCMI_ETR 87
+#define CK_SCMI_FMC 88
+#define CK_SCMI_GPIOA 89
+#define CK_SCMI_GPIOB 90
+#define CK_SCMI_GPIOC 91
+#define CK_SCMI_GPIOD 92
+#define CK_SCMI_GPIOE 93
+#define CK_SCMI_GPIOF 94
+#define CK_SCMI_GPIOG 95
+#define CK_SCMI_GPIOH 96
+#define CK_SCMI_GPIOI 97
+#define CK_SCMI_GPIOJ 98
+#define CK_SCMI_GPIOK 99
+#define CK_SCMI_GPIOZ 100
+#define CK_SCMI_HPDMA1 101
+#define CK_SCMI_HPDMA2 102
+#define CK_SCMI_HPDMA3 103
+#define CK_SCMI_HSEM 104
+#define CK_SCMI_IPCC1 105
+#define CK_SCMI_IPCC2 106
+#define CK_SCMI_LPDMA 107
+#define CK_SCMI_RETRAM 108
+#define CK_SCMI_SRAM1 109
+#define CK_SCMI_SRAM2 110
+#define CK_SCMI_LPSRAM1 111
+#define CK_SCMI_LPSRAM2 112
+#define CK_SCMI_LPSRAM3 113
+#define CK_SCMI_VDERAM 114
+#define CK_SCMI_SYSRAM 115
+#define CK_SCMI_OSPI1 116
+#define CK_SCMI_OSPI2 117
+#define CK_SCMI_TPIU 118
+#define CK_SCMI_SYSDBG 119
+#define CK_SCMI_SYSATB 120
+#define CK_SCMI_TSDBG 121
+#define CK_SCMI_STM500 122
+
+#endif /* _DT_BINDINGS_STM32MP25_CLKS_H_ */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
new file mode 100644
index 0000000000..96908014e0
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+
+#ifndef PM7325_SID
+#define PM7325_SID 1
+#endif
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+/* ADC channels for PM7325_ADC for PMIC7 */
+#define PM7325_ADC7_REF_GND (PM7325_SID << 8 | ADC7_REF_GND)
+#define PM7325_ADC7_1P25VREF (PM7325_SID << 8 | ADC7_1P25VREF)
+#define PM7325_ADC7_VREF_VADC (PM7325_SID << 8 | ADC7_VREF_VADC)
+#define PM7325_ADC7_DIE_TEMP (PM7325_SID << 8 | ADC7_DIE_TEMP)
+
+#define PM7325_ADC7_AMUX_THM1 (PM7325_SID << 8 | ADC7_AMUX_THM1)
+#define PM7325_ADC7_AMUX_THM2 (PM7325_SID << 8 | ADC7_AMUX_THM2)
+#define PM7325_ADC7_AMUX_THM3 (PM7325_SID << 8 | ADC7_AMUX_THM3)
+#define PM7325_ADC7_AMUX_THM4 (PM7325_SID << 8 | ADC7_AMUX_THM4)
+#define PM7325_ADC7_AMUX_THM5 (PM7325_SID << 8 | ADC7_AMUX_THM5)
+#define PM7325_ADC7_GPIO1 (PM7325_SID << 8 | ADC7_GPIO1)
+#define PM7325_ADC7_GPIO2 (PM7325_SID << 8 | ADC7_GPIO2)
+#define PM7325_ADC7_GPIO3 (PM7325_SID << 8 | ADC7_GPIO3)
+#define PM7325_ADC7_GPIO4 (PM7325_SID << 8 | ADC7_GPIO4)
+
+/* 30k pull-up1 */
+#define PM7325_ADC7_AMUX_THM1_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM7325_ADC7_AMUX_THM2_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM7325_ADC7_AMUX_THM3_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM7325_ADC7_AMUX_THM4_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM7325_ADC7_AMUX_THM5_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM7325_ADC7_GPIO1_30K_PU (PM7325_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM7325_ADC7_GPIO2_30K_PU (PM7325_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM7325_ADC7_GPIO3_30K_PU (PM7325_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM7325_ADC7_GPIO4_30K_PU (PM7325_SID << 8 | ADC7_GPIO4_30K_PU)
+
+/* 100k pull-up2 */
+#define PM7325_ADC7_AMUX_THM1_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM7325_ADC7_AMUX_THM2_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM7325_ADC7_AMUX_THM3_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM7325_ADC7_AMUX_THM4_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM7325_ADC7_AMUX_THM5_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM7325_ADC7_GPIO1_100K_PU (PM7325_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM7325_ADC7_GPIO2_100K_PU (PM7325_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM7325_ADC7_GPIO3_100K_PU (PM7325_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM7325_ADC7_GPIO4_100K_PU (PM7325_SID << 8 | ADC7_GPIO4_100K_PU)
+
+/* 400k pull-up3 */
+#define PM7325_ADC7_AMUX_THM1_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM7325_ADC7_AMUX_THM2_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM7325_ADC7_AMUX_THM3_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM7325_ADC7_AMUX_THM4_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM7325_ADC7_AMUX_THM5_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM7325_ADC7_GPIO1_400K_PU (PM7325_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM7325_ADC7_GPIO2_400K_PU (PM7325_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM7325_ADC7_GPIO3_400K_PU (PM7325_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM7325_ADC7_GPIO4_400K_PU (PM7325_SID << 8 | ADC7_GPIO4_400K_PU)
+
+/* 1/3 Divider */
+#define PM7325_ADC7_GPIO4_DIV3 (PM7325_SID << 8 | ADC7_GPIO4_DIV3)
+
+#define PM7325_ADC7_VPH_PWR (PM7325_SID << 8 | ADC7_VPH_PWR)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
new file mode 100644
index 0000000000..c0680d1285
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+#define SMB139x_1_ADC7_SMB_TEMP (SMB139x_1_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_1_ADC7_ICHG_SMB (SMB139x_1_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_1_ADC7_IIN_SMB (SMB139x_1_SID << 8 | ADC7_IIN_SMB)
+
+#define SMB139x_2_ADC7_SMB_TEMP (SMB139x_2_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_2_ADC7_ICHG_SMB (SMB139x_2_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_2_ADC7_IIN_SMB (SMB139x_2_SID << 8 | ADC7_IIN_SMB)
+
+#endif
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 08adfe2596..ef07ecd4d5 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -239,12 +239,15 @@
#define ADC7_GPIO3 0x0c
#define ADC7_GPIO4 0x0d
+#define ADC7_SMB_TEMP 0x06
#define ADC7_CHG_TEMP 0x10
#define ADC7_USB_IN_V_16 0x11
#define ADC7_VDC_16 0x12
#define ADC7_CC1_ID 0x13
#define ADC7_VREF_BAT_THERM 0x15
#define ADC7_IIN_FB 0x17
+#define ADC7_ICHG_SMB 0x18
+#define ADC7_IIN_SMB 0x19
/* 30k pull-up1 */
#define ADC7_AMUX_THM1_30K_PU 0x24
diff --git a/include/dt-bindings/interconnect/qcom,sm6115.h b/include/dt-bindings/interconnect/qcom,sm6115.h
new file mode 100644
index 0000000000..21090e585f
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm6115.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define SNOC_BIMC_MAS 3
+#define MASTER_GRAPHICS_3D 4
+#define MASTER_TCU_0 5
+#define SLAVE_EBI_CH0 6
+#define BIMC_SNOC_SLV 7
+
+/* CNOC */
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_USB 2
+#define SLAVE_APSS_THROTTLE_CFG 3
+#define SLAVE_BIMC_CFG 4
+#define SLAVE_BOOT_ROM 5
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 6
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MX_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_DCC_CFG 13
+#define SLAVE_DDR_PHY_CFG 14
+#define SLAVE_DDR_SS_CFG 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_DISPLAY_THROTTLE_CFG 17
+#define SLAVE_GPU_CFG 18
+#define SLAVE_GPU_THROTTLE_CFG 19
+#define SLAVE_HWKM_CORE 20
+#define SLAVE_IMEM_CFG 21
+#define SLAVE_IPA_CFG 22
+#define SLAVE_LPASS 23
+#define SLAVE_MAPSS 24
+#define SLAVE_MDSP_MPU_CFG 25
+#define SLAVE_MESSAGE_RAM 26
+#define SLAVE_CNOC_MSS 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PKA_CORE 30
+#define SLAVE_PMIC_ARB 31
+#define SLAVE_QDSS_CFG 32
+#define SLAVE_QM_CFG 33
+#define SLAVE_QM_MPU_CFG 34
+#define SLAVE_QPIC 35
+#define SLAVE_QUP_0 36
+#define SLAVE_RPM 37
+#define SLAVE_SDCC_1 38
+#define SLAVE_SDCC_2 39
+#define SLAVE_SECURITY 40
+#define SLAVE_SNOC_CFG 41
+#define SLAVE_TCSR 42
+#define SLAVE_TLMM 43
+#define SLAVE_USB3 44
+#define SLAVE_VENUS_CFG 45
+#define SLAVE_VENUS_THROTTLE_CFG 46
+#define SLAVE_VSENSE_CTRL_CFG 47
+#define SLAVE_SERVICE_CNOC 48
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define BIMC_SNOC_MAS 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QPIC 7
+#define MASTER_QUP_0 8
+#define MASTER_IPA 9
+#define MASTER_QDSS_ETR 10
+#define MASTER_SDCC_1 11
+#define MASTER_SDCC_2 12
+#define MASTER_USB3 13
+#define SLAVE_APPSS 14
+#define SNOC_CNOC_SLV 15
+#define SLAVE_OCIMEM 16
+#define SLAVE_PIMEM 17
+#define SNOC_BIMC_SLV 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* CLK Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP_PORT0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
new file mode 100644
index 0000000000..6c1eaf04e2
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_3 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SP 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_2 7
+#define SLAVE_A2NOC_SNOC 8
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_RBCPR_CX_CFG 5
+#define SLAVE_CPR_HMX 6
+#define SLAVE_RBCPR_MMCX_CFG 7
+#define SLAVE_RBCPR_MXA_CFG 8
+#define SLAVE_RBCPR_MXC_CFG 9
+#define SLAVE_CPR_NSPCX 10
+#define SLAVE_CRYPTO_0_CFG 11
+#define SLAVE_CX_RDPM 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_GFX3D_CFG 14
+#define SLAVE_I2C 15
+#define SLAVE_I3C_IBI0_CFG 16
+#define SLAVE_I3C_IBI1_CFG 17
+#define SLAVE_IMEM_CFG 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_2_RDPM 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PCIE_RSCC 24
+#define SLAVE_PDM 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_3 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB3_0 38
+#define SLAVE_VENUS_CFG 39
+#define SLAVE_VSENSE_CTRL_CFG 40
+#define SLAVE_CNOC_MNOC_CFG 41
+#define SLAVE_NSP_QTB_CFG 42
+#define SLAVE_PCIE_ANOC_CFG 43
+#define SLAVE_SERVICE_CNOC_CFG 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_TME_CFG 5
+#define SLAVE_APPSS 6
+#define SLAVE_CNOC_CFG 7
+#define SLAVE_DDRSS_CFG 8
+#define SLAVE_IMEM 9
+#define SLAVE_SERVICE_CNOC 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_UBWC_P_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MSS_PROC 6
+#define MASTER_MNOC_HF_MEM_NOC 7
+#define MASTER_MNOC_SF_MEM_NOC 8
+#define MASTER_COMPUTE_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_UBWC_P 12
+#define MASTER_GIC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CDSP_HCP 4
+#define MASTER_VIDEO 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define SLAVE_SNOC_GEM_NOC_SF 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
new file mode 100644
index 0000000000..a38c347269
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_SDCC_4 2
+#define MASTER_UFS_MEM 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_QUP_0 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_SP 3
+#define MASTER_QDSS_ETR 4
+#define MASTER_QDSS_ETR_1 5
+#define MASTER_SDCC_2 6
+#define SLAVE_A2NOC_SNOC 7
+
+#define MASTER_DDR_PERF_MODE 0
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+#define SLAVE_DDR_PERF_MODE 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_AHB2PHY_2 3
+#define SLAVE_AV1_ENC_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_GFX3D_CFG 9
+#define SLAVE_IMEM_CFG 10
+#define SLAVE_IPC_ROUTER_CFG 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PCIE_1_CFG 13
+#define SLAVE_PCIE_2_CFG 14
+#define SLAVE_PCIE_3_CFG 15
+#define SLAVE_PCIE_4_CFG 16
+#define SLAVE_PCIE_5_CFG 17
+#define SLAVE_PCIE_6A_CFG 18
+#define SLAVE_PCIE_6B_CFG 19
+#define SLAVE_PCIE_RSC_CFG 20
+#define SLAVE_PDM 21
+#define SLAVE_PRNG 22
+#define SLAVE_QDSS_CFG 23
+#define SLAVE_QSPI_0 24
+#define SLAVE_QUP_0 25
+#define SLAVE_QUP_1 26
+#define SLAVE_QUP_2 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_SDCC_4 29
+#define SLAVE_SMMUV3_CFG 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_UFS_MEM_CFG 33
+#define SLAVE_USB2 34
+#define SLAVE_USB3_0 35
+#define SLAVE_USB3_1 36
+#define SLAVE_USB3_2 37
+#define SLAVE_USB3_MP 38
+#define SLAVE_USB4_0 39
+#define SLAVE_USB4_1 40
+#define SLAVE_USB4_2 41
+#define SLAVE_VENUS_CFG 42
+#define SLAVE_LPASS_QTB_CFG 43
+#define SLAVE_CNOC_MNOC_CFG 44
+#define SLAVE_NSP_QTB_CFG 45
+#define SLAVE_QDSS_STM 46
+#define SLAVE_TCU 47
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_TME_CFG 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC_CFG 5
+#define SLAVE_BOOT_IMEM 6
+#define SLAVE_IMEM 7
+#define SLAVE_PCIE_0 8
+#define SLAVE_PCIE_1 9
+#define SLAVE_PCIE_2 10
+#define SLAVE_PCIE_3 11
+#define SLAVE_PCIE_4 12
+#define SLAVE_PCIE_5 13
+#define SLAVE_PCIE_6A 14
+#define SLAVE_PCIE_6B 15
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_GIC2 11
+#define SLAVE_GEM_NOC_CNOC 12
+#define SLAVE_LLCC 13
+#define SLAVE_MEM_NOC_PCIE_SNOC 14
+#define MASTER_MNOC_HF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+#define MASTER_ANOC_PCIE_GEM_NOC_PCIE 18
+#define SLAVE_LLCC_PCIE 19
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+#define MASTER_LLCC_PCIE 4
+#define SLAVE_EBI1_PCIE 5
+
+#define MASTER_AV1_ENC 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_EVA 4
+#define MASTER_MDP 5
+#define MASTER_VIDEO 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_MDP_DISP 13
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 14
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_NORTH 0
+#define MASTER_PCIE_SOUTH 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+#define MASTER_PCIE_NORTH_PCIE 3
+#define MASTER_PCIE_SOUTH_PCIE 4
+#define SLAVE_ANOC_PCIE_GEM_NOC_PCIE 5
+
+#define MASTER_PCIE_3 0
+#define MASTER_PCIE_4 1
+#define MASTER_PCIE_5 2
+#define SLAVE_PCIE_NORTH 3
+#define MASTER_PCIE_3_PCIE 4
+#define MASTER_PCIE_4_PCIE 5
+#define MASTER_PCIE_5_PCIE 6
+#define SLAVE_PCIE_NORTH_PCIE 7
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define MASTER_PCIE_6A 3
+#define MASTER_PCIE_6B 4
+#define SLAVE_PCIE_SOUTH 5
+#define MASTER_PCIE_0_PCIE 6
+#define MASTER_PCIE_1_PCIE 7
+#define MASTER_PCIE_2_PCIE 8
+#define MASTER_PCIE_6A_PCIE 9
+#define MASTER_PCIE_6B_PCIE 10
+#define SLAVE_PCIE_SOUTH_PCIE 11
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_GIC1 2
+#define MASTER_USB_NOC_SNOC 3
+#define SLAVE_SNOC_GEM_NOC_SF 4
+
+#define MASTER_AGGRE_USB_NORTH 0
+#define MASTER_AGGRE_USB_SOUTH 1
+#define SLAVE_USB_NOC_SNOC 2
+
+#define MASTER_USB2 0
+#define MASTER_USB3_MP 1
+#define SLAVE_AGGRE_USB_NORTH 2
+
+#define MASTER_USB3_0 0
+#define MASTER_USB3_1 1
+#define MASTER_USB3_2 2
+#define MASTER_USB4_0 3
+#define MASTER_USB4_1 4
+#define MASTER_USB4_2 5
+#define SLAVE_AGGRE_USB_SOUTH 6
+
+#endif
diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h
index 44ec0c50e3..01fd0ac4dd 100644
--- a/include/dt-bindings/power/meson-g12a-power.h
+++ b/include/dt-bindings/power/meson-g12a-power.h
@@ -10,5 +10,6 @@
#define PWRC_G12A_VPU_ID 0
#define PWRC_G12A_ETH_ID 1
#define PWRC_G12A_NNA_ID 2
+#define PWRC_G12A_ISP_ID 3
#endif
diff --git a/include/dt-bindings/reset/amlogic,c3-reset.h b/include/dt-bindings/reset/amlogic,c3-reset.h
new file mode 100644
index 0000000000..d9127863f6
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,c3-reset.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_RESET_H
+#define _DT_BINDINGS_AMLOGIC_C3_RESET_H
+
+/* RESET0 */
+/* 0-3 */
+#define RESET_USBCTRL 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+/* 9 */
+#define RESET_USB2DRD 10
+#define RESET_MIPI_DSI_HOST 11
+#define RESET_MIPI_DSI_PHY 12
+/* 13-20 */
+#define RESET_GE2D 21
+#define RESET_DWAP 22
+/* 23-31 */
+
+/* RESET1 */
+#define RESET_AUDIO 32
+/* 33-34 */
+#define RESET_DDRAPB 35
+#define RESET_DDR 36
+#define RESET_DOS_CAPB3 37
+#define RESET_DOS 38
+/* 39-46 */
+#define RESET_NNA 47
+#define RESET_ETHERNET 48
+#define RESET_ISP 49
+#define RESET_VC9000E_APB 50
+#define RESET_VC9000E_A 51
+/* 52 */
+#define RESET_VC9000E_CORE 53
+/* 54-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IRCTRL 65
+/* 66 */
+#define RESET_TEMP_PII 67
+/* 68-72 */
+#define RESET_SPICC_0 73
+#define RESET_SPICC_1 74
+#define RESET_RSA 75
+
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SAR_ADC 82
+/* 83-87 */
+#define RESET_ACODEC 88
+/* 89-90 */
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+#define RESET_ISP_NIC_GPV 96
+#define RESET_ISP_NIC_MAIN 97
+#define RESET_ISP_NIC_VCLK 98
+#define RESET_ISP_NIC_VOUT 99
+#define RESET_ISP_NIC_ALL 100
+#define RESET_VOUT 101
+#define RESET_VOUT_VENC 102
+/* 103 */
+#define RESET_CVE_NIC_GPV 104
+#define RESET_CVE_NIC_MAIN 105
+#define RESET_CVE_NIC_GE2D 106
+#define RESET_CVE_NIC_DW 106
+#define RESET_CVE_NIC_CVE 108
+#define RESET_CVE_NIC_ALL 109
+#define RESET_CVE 110
+/* 112-127 */
+
+/* RESET4 */
+#define RESET_RTC 128
+#define RESET_PWM_AB 129
+#define RESET_PWM_CD 130
+#define RESET_PWM_EF 131
+#define RESET_PWM_GH 132
+#define RESET_PWM_IJ 133
+#define RESET_PWM_KL 134
+#define RESET_PWM_MN 135
+/* 136-137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+#define RESET_UART_F 143
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+/* 149-151 */
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_SD_EMMC_C 154
+
+/* RESET5 */
+/* 160-172 */
+#define RESET_BRG_NIC_NNA 173
+#define RESET_BRG_MUX_NIC_MAIN 174
+#define RESET_BRG_AO_NIC_ALL 175
+/* 176-183 */
+#define RESET_BRG_NIC_VAPB 184
+#define RESET_BRG_NIC_SDIO_B 185
+#define RESET_BRG_NIC_SDIO_A 186
+#define RESET_BRG_NIC_EMMC 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt7988-resets.h b/include/dt-bindings/reset/mediatek,mt7988-resets.h
new file mode 100644
index 0000000000..4933019713
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt7988-resets.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7988
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7988
+
+/* ETHWARP resets */
+#define MT7988_ETHWARP_RST_SWITCH 0
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7988 */
diff --git a/include/dt-bindings/reset/mt8188-resets.h b/include/dt-bindings/reset/mt8188-resets.h
index ba9a5e9b88..5a58c54e7d 100644
--- a/include/dt-bindings/reset/mt8188-resets.h
+++ b/include/dt-bindings/reset/mt8188-resets.h
@@ -38,4 +38,79 @@
#define MT8188_INFRA_RST1_THERMAL_CTRL_RST 1
#define MT8188_INFRA_RST3_PTP_CTRL_RST 2
+#define MT8188_VDO0_RST_DISP_OVL0 0
+#define MT8188_VDO0_RST_FAKE_ENG0 1
+#define MT8188_VDO0_RST_DISP_CCORR0 2
+#define MT8188_VDO0_RST_DISP_MUTEX0 3
+#define MT8188_VDO0_RST_DISP_GAMMA0 4
+#define MT8188_VDO0_RST_DISP_DITHER0 5
+#define MT8188_VDO0_RST_DISP_WDMA0 6
+#define MT8188_VDO0_RST_DISP_RDMA0 7
+#define MT8188_VDO0_RST_DSI0 8
+#define MT8188_VDO0_RST_DSI1 9
+#define MT8188_VDO0_RST_DSC_WRAP0 10
+#define MT8188_VDO0_RST_VPP_MERGE0 11
+#define MT8188_VDO0_RST_DP_INTF0 12
+#define MT8188_VDO0_RST_DISP_AAL0 13
+#define MT8188_VDO0_RST_INLINEROT0 14
+#define MT8188_VDO0_RST_APB_BUS 15
+#define MT8188_VDO0_RST_DISP_COLOR0 16
+#define MT8188_VDO0_RST_MDP_WROT0 17
+#define MT8188_VDO0_RST_DISP_RSZ0 18
+
+#define MT8188_VDO1_RST_SMI_LARB2 0
+#define MT8188_VDO1_RST_SMI_LARB3 1
+#define MT8188_VDO1_RST_GALS 2
+#define MT8188_VDO1_RST_FAKE_ENG0 3
+#define MT8188_VDO1_RST_FAKE_ENG1 4
+#define MT8188_VDO1_RST_MDP_RDMA0 5
+#define MT8188_VDO1_RST_MDP_RDMA1 6
+#define MT8188_VDO1_RST_MDP_RDMA2 7
+#define MT8188_VDO1_RST_MDP_RDMA3 8
+#define MT8188_VDO1_RST_VPP_MERGE0 9
+#define MT8188_VDO1_RST_VPP_MERGE1 10
+#define MT8188_VDO1_RST_VPP_MERGE2 11
+#define MT8188_VDO1_RST_VPP_MERGE3 12
+#define MT8188_VDO1_RST_VPP_MERGE4 13
+#define MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8188_VDO1_RST_DISP_MUTEX 16
+#define MT8188_VDO1_RST_MDP_RDMA4 17
+#define MT8188_VDO1_RST_MDP_RDMA5 18
+#define MT8188_VDO1_RST_MDP_RDMA6 19
+#define MT8188_VDO1_RST_MDP_RDMA7 20
+#define MT8188_VDO1_RST_DP_INTF1_MMCK 21
+#define MT8188_VDO1_RST_DPI0_MM_CK 22
+#define MT8188_VDO1_RST_DPI1_MM_CK 23
+#define MT8188_VDO1_RST_MERGE0_DL_ASYNC 24
+#define MT8188_VDO1_RST_MERGE1_DL_ASYNC 25
+#define MT8188_VDO1_RST_MERGE2_DL_ASYNC 26
+#define MT8188_VDO1_RST_MERGE3_DL_ASYNC 27
+#define MT8188_VDO1_RST_MERGE4_DL_ASYNC 28
+#define MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC 29
+#define MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC 30
+#define MT8188_VDO1_RST_PADDING0 31
+#define MT8188_VDO1_RST_PADDING1 32
+#define MT8188_VDO1_RST_PADDING2 33
+#define MT8188_VDO1_RST_PADDING3 34
+#define MT8188_VDO1_RST_PADDING4 35
+#define MT8188_VDO1_RST_PADDING5 36
+#define MT8188_VDO1_RST_PADDING6 37
+#define MT8188_VDO1_RST_PADDING7 38
+#define MT8188_VDO1_RST_DISP_RSZ0 39
+#define MT8188_VDO1_RST_DISP_RSZ1 40
+#define MT8188_VDO1_RST_DISP_RSZ2 41
+#define MT8188_VDO1_RST_DISP_RSZ3 42
+#define MT8188_VDO1_RST_HDR_VDO_FE0 43
+#define MT8188_VDO1_RST_HDR_GFX_FE0 44
+#define MT8188_VDO1_RST_HDR_VDO_BE 45
+#define MT8188_VDO1_RST_HDR_VDO_FE1 46
+#define MT8188_VDO1_RST_HDR_GFX_FE1 47
+#define MT8188_VDO1_RST_DISP_MIXER 48
+#define MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC 49
+#define MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC 50
+#define MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC 51
+#define MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC 52
+#define MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC 53
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8188 */
diff --git a/include/dt-bindings/reset/qcom,sm8650-gpucc.h b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
new file mode 100644
index 0000000000..f021a6cccc
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/st,stm32mp25-rcc.h b/include/dt-bindings/reset/st,stm32mp25-rcc.h
new file mode 100644
index 0000000000..d5615930bc
--- /dev/null
+++ b/include/dt-bindings/reset/st,stm32mp25-rcc.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author(s): Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_RESET_H_
+#define _DT_BINDINGS_STM32MP25_RESET_H_
+
+#define TIM1_R 0
+#define TIM2_R 1
+#define TIM3_R 2
+#define TIM4_R 3
+#define TIM5_R 4
+#define TIM6_R 5
+#define TIM7_R 6
+#define TIM8_R 7
+#define TIM10_R 8
+#define TIM11_R 9
+#define TIM12_R 10
+#define TIM13_R 11
+#define TIM14_R 12
+#define TIM15_R 13
+#define TIM16_R 14
+#define TIM17_R 15
+#define TIM20_R 16
+#define LPTIM1_R 17
+#define LPTIM2_R 18
+#define LPTIM3_R 19
+#define LPTIM4_R 20
+#define LPTIM5_R 21
+#define SPI1_R 22
+#define SPI2_R 23
+#define SPI3_R 24
+#define SPI4_R 25
+#define SPI5_R 26
+#define SPI6_R 27
+#define SPI7_R 28
+#define SPI8_R 29
+#define SPDIFRX_R 30
+#define USART1_R 31
+#define USART2_R 32
+#define USART3_R 33
+#define UART4_R 34
+#define UART5_R 35
+#define USART6_R 36
+#define UART7_R 37
+#define UART8_R 38
+#define UART9_R 39
+#define LPUART1_R 40
+#define IS2M_R 41
+#define I2C1_R 42
+#define I2C2_R 43
+#define I2C3_R 44
+#define I2C4_R 45
+#define I2C5_R 46
+#define I2C6_R 47
+#define I2C7_R 48
+#define I2C8_R 49
+#define SAI1_R 50
+#define SAI2_R 51
+#define SAI3_R 52
+#define SAI4_R 53
+#define MDF1_R 54
+#define MDF2_R 55
+#define FDCAN_R 56
+#define HDP_R 57
+#define ADC12_R 58
+#define ADC3_R 59
+#define ETH1_R 60
+#define ETH2_R 61
+#define USB2_R 62
+#define USB2PHY1_R 63
+#define USB2PHY2_R 64
+#define USB3DR_R 65
+#define USB3PCIEPHY_R 66
+#define USBTC_R 67
+#define ETHSW_R 68
+#define SDMMC1_R 69
+#define SDMMC1DLL_R 70
+#define SDMMC2_R 71
+#define SDMMC2DLL_R 72
+#define SDMMC3_R 73
+#define SDMMC3DLL_R 74
+#define GPU_R 75
+#define LTDC_R 76
+#define DSI_R 77
+#define LVDS_R 78
+#define CSI_R 79
+#define DCMIPP_R 80
+#define CCI_R 81
+#define VDEC_R 82
+#define VENC_R 83
+#define WWDG1_R 84
+#define WWDG2_R 85
+#define VREF_R 86
+#define DTS_R 87
+#define CRC_R 88
+#define SERC_R 89
+#define OSPIIOM_R 90
+#define I3C1_R 91
+#define I3C2_R 92
+#define I3C3_R 93
+#define I3C4_R 94
+#define IWDG2_KER_R 95
+#define IWDG4_KER_R 96
+#define RNG_R 97
+#define PKA_R 98
+#define SAES_R 99
+#define HASH_R 100
+#define CRYP1_R 101
+#define CRYP2_R 102
+#define PCIE_R 103
+#define OSPI1_R 104
+#define OSPI1DLL_R 105
+#define OSPI2_R 106
+#define OSPI2DLL_R 107
+#define FMC_R 108
+#define DBG_R 109
+#define GPIOA_R 110
+#define GPIOB_R 111
+#define GPIOC_R 112
+#define GPIOD_R 113
+#define GPIOE_R 114
+#define GPIOF_R 115
+#define GPIOG_R 116
+#define GPIOH_R 117
+#define GPIOI_R 118
+#define GPIOJ_R 119
+#define GPIOK_R 120
+#define GPIOZ_R 121
+#define HPDMA1_R 122
+#define HPDMA2_R 123
+#define HPDMA3_R 124
+#define LPDMA_R 125
+#define HSEM_R 126
+#define IPCC1_R 127
+#define IPCC2_R 128
+#define C2_HOLDBOOT_R 129
+#define C1_HOLDBOOT_R 130
+#define C1_R 131
+#define C1P1POR_R 132
+#define C1P1_R 133
+#define C2_R 134
+#define C3_R 135
+#define SYS_R 136
+#define VSW_R 137
+#define C1MS_R 138
+#define DDRCP_R 139
+#define DDRCAPB_R 140
+#define DDRPHYCAPB_R 141
+#define DDRCFG_R 142
+#define DDR_R 143
+
+#define STM32MP25_LAST_RESET 144
+
+#define RST_SCMI_C1_R 0
+#define RST_SCMI_C2_R 1
+#define RST_SCMI_C1_HOLDBOOT_R 2
+#define RST_SCMI_C2_HOLDBOOT_R 3
+#define RST_SCMI_FMC 4
+#define RST_SCMI_OSPI1 5
+#define RST_SCMI_OSPI1DLL 6
+#define RST_SCMI_OSPI2 7
+#define RST_SCMI_OSPI2DLL 8
+
+#endif /* _DT_BINDINGS_STM32MP25_RESET_H_ */
diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h
index 6e66a802b9..668f199df9 100644
--- a/include/dt-bindings/soc/rockchip,vop2.h
+++ b/include/dt-bindings/soc/rockchip,vop2.h
@@ -10,5 +10,9 @@
#define ROCKCHIP_VOP2_EP_LVDS0 5
#define ROCKCHIP_VOP2_EP_MIPI1 6
#define ROCKCHIP_VOP2_EP_LVDS1 7
+#define ROCKCHIP_VOP2_EP_HDMI1 8
+#define ROCKCHIP_VOP2_EP_EDP1 9
+#define ROCKCHIP_VOP2_EP_DP0 10
+#define ROCKCHIP_VOP2_EP_DP1 11
#endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */
diff --git a/include/kunit/device.h b/include/kunit/device.h
new file mode 100644
index 0000000000..2450110ad6
--- /dev/null
+++ b/include/kunit/device.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit basic device implementation
+ *
+ * Helpers for creating and managing fake devices for KUnit tests.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_H
+#define _KUNIT_DEVICE_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+
+#include <kunit/test.h>
+
+struct device;
+struct device_driver;
+
+/**
+ * kunit_driver_create() - Create a struct device_driver attached to the kunit_bus
+ * @test: The test context object.
+ * @name: The name to give the created driver.
+ *
+ * Creates a struct device_driver attached to the kunit_bus, with the name @name.
+ * This driver will automatically be cleaned up on test exit.
+ *
+ * Return: a stub struct device_driver, managed by KUnit, with the name @name.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given name,
+ * and a corresponding driver. The device and driver will be cleaned up on test
+ * exit, or when kunit_device_unregister is called. See also
+ * kunit_device_register_with_driver, if you wish to provide your own
+ * struct device_driver.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register_with_driver() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ * @drv: The struct device_driver to associate with the device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given
+ * name, and driver. The device will be cleaned up on test exit, or when
+ * kunit_device_unregister is called. See also kunit_device_register, if you
+ * wish KUnit to create and manage a driver for you.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv);
+
+/**
+ * kunit_device_unregister() - Unregister a KUnit-managed device
+ * @test: The test context object which created the device
+ * @dev: The device.
+ *
+ * Unregisters and destroys a struct device which was created with
+ * kunit_device_register or kunit_device_register_with_driver. If KUnit created
+ * a driver, cleans it up as well.
+ */
+void kunit_device_unregister(struct kunit *test, struct device *dev);
+
+#endif
+
+#endif
diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h
new file mode 100644
index 0000000000..44d1237093
--- /dev/null
+++ b/include/kunit/skbuff.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit resource management helpers for SKBs (skbuff).
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef _KUNIT_SKBUFF_H
+#define _KUNIT_SKBUFF_H
+
+#include <kunit/resource.h>
+#include <linux/skbuff.h>
+
+static void kunit_action_kfree_skb(void *p)
+{
+ kfree_skb((struct sk_buff *)p);
+}
+
+/**
+ * kunit_zalloc_skb() - Allocate and initialize a resource managed skb.
+ * @test: The test case to which the skb belongs
+ * @len: size to allocate
+ *
+ * Allocate a new struct sk_buff with GFP_KERNEL, zero fill the give length
+ * and add it as a resource to the kunit test for automatic cleanup.
+ *
+ * Returns: newly allocated SKB, or %NULL on error
+ */
+static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len,
+ gfp_t gfp)
+{
+ struct sk_buff *res = alloc_skb(len, GFP_KERNEL);
+
+ if (!res || skb_pad(res, len))
+ return NULL;
+
+ if (kunit_add_action_or_reset(test, kunit_action_kfree_skb, res))
+ return NULL;
+
+ return res;
+}
+
+/**
+ * kunit_kfree_skb() - Like kfree_skb except for allocations managed by KUnit.
+ * @test: The test case to which the resource belongs.
+ * @skb: The SKB to free.
+ */
+static inline void kunit_kfree_skb(struct kunit *test, struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ kunit_release_action(test, kunit_action_kfree_skb, (void *)skb);
+}
+
+#endif /* _KUNIT_SKBUFF_H */
diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h
index 85315c80b3..bf940322df 100644
--- a/include/kunit/static_stub.h
+++ b/include/kunit/static_stub.h
@@ -93,7 +93,7 @@ void __kunit_activate_static_stub(struct kunit *test,
* The redirection can be disabled again with kunit_deactivate_static_stub().
*/
#define kunit_activate_static_stub(test, real_fn_addr, replacement_addr) do { \
- typecheck_fn(typeof(&real_fn_addr), replacement_addr); \
+ typecheck_fn(typeof(&replacement_addr), real_fn_addr); \
__kunit_activate_static_stub(test, real_fn_addr, replacement_addr); \
} while (0)
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 20ed9f9275..fcb4a4940a 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -253,6 +253,7 @@ struct kunit_suite {
struct dentry *debugfs;
struct string_stream *log;
int suite_init_err;
+ bool is_init;
};
/* Stores an array of suites, end points one past the end */
@@ -337,6 +338,9 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin);
void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr);
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set);
+
#if IS_BUILTIN(CONFIG_KUNIT)
int kunit_run_all_tests(void);
#else
@@ -371,6 +375,11 @@ static inline int kunit_run_all_tests(void)
#define kunit_test_suite(suite) kunit_test_suites(&suite)
+#define __kunit_init_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_init_test_suites") = { __VA_ARGS__ }
+
/**
* kunit_test_init_section_suites() - used to register one or more &struct
* kunit_suite containing init functions or
@@ -378,21 +387,21 @@ static inline int kunit_run_all_tests(void)
*
* @__suites: a statically allocated list of &struct kunit_suite.
*
- * This functions identically as kunit_test_suites() except that it suppresses
- * modpost warnings for referencing functions marked __init or data marked
- * __initdata; this is OK because currently KUnit only runs tests upon boot
- * during the init phase or upon loading a module during the init phase.
+ * This functions similar to kunit_test_suites() except that it compiles the
+ * list of suites during init phase.
+ *
+ * This macro also suffixes the array and suite declarations it makes with
+ * _probe; so that modpost suppresses warnings about referencing init data
+ * for symbols named in this manner.
*
- * NOTE TO KUNIT DEVS: If we ever allow KUnit tests to be run after boot, these
- * tests must be excluded.
+ * Note: these init tests are not able to be run after boot so there is no
+ * "run" debugfs file generated for these tests.
*
- * The only thing this macro does that's different from kunit_test_suites is
- * that it suffixes the array and suite declarations it makes with _probe;
- * modpost suppresses warnings about referencing init data for symbols named in
- * this manner.
+ * Also, do not mark the suite or test case structs with __initdata because
+ * they will be used after the init phase with debugfs.
*/
#define kunit_test_init_section_suites(__suites...) \
- __kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
+ __kunit_init_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
##__suites)
#define kunit_test_init_section_suite(suite) \
@@ -749,7 +758,7 @@ do { \
.right_text = #right, \
}; \
\
- if (likely(strcmp(__left, __right) op 0)) \
+ if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \
break; \
\
\
@@ -1514,6 +1523,25 @@ do { \
return NULL; \
}
+/**
+ * KUNIT_ARRAY_PARAM_DESC() - Define test parameter generator from an array.
+ * @name: prefix for the test parameter generator function.
+ * @array: array of test parameters.
+ * @desc_member: structure member from array element to use as description
+ *
+ * Define function @name_gen_params which uses @array to generate parameters.
+ */
+#define KUNIT_ARRAY_PARAM_DESC(name, array, desc_member) \
+ static const void *name##_gen_params(const void *prev, char *desc) \
+ { \
+ typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (__next - (array) < ARRAY_SIZE((array))) { \
+ strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \
+ return __next; \
+ } \
+ return NULL; \
+ }
+
// TODO(dlatypov@google.com): consider eventually migrating users to explicitly
// include resource.h themselves if they need it.
#include <kunit/resource.h>
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 4b9d8fb393..df32355e3e 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -86,7 +86,7 @@ void kvm_vcpu_pmu_resync_el0(void);
*/
#define kvm_pmu_update_vcpu_events(vcpu) \
do { \
- if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
+ if (!has_vhe() && kvm_arm_support_pmu_v3()) \
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4db54e928b..b7165e52b3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,6 +15,7 @@
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+#include <linux/node.h>
struct irq_domain;
struct irq_domain_ops;
@@ -424,6 +425,23 @@ extern int acpi_blacklisted(void);
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
+#ifdef CONFIG_ACPI_THERMAL_LIB
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+#endif
+
+#ifdef CONFIG_ACPI_HMAT
+int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord);
+#else
+static inline int acpi_get_genport_coordinates(u32 uid,
+ struct access_coordinate *coord)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
@@ -756,6 +774,10 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
#define ACPI_HANDLE(dev) (NULL)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
+/* Get rid of the -Wunused-variable for adev */
+#define acpi_dev_uid_match(adev, uid2) (adev && false)
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
+
#include <acpi/acpi_numa.h>
struct fwnode_handle;
@@ -772,17 +794,6 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_device;
-static inline bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2)
-{
- return false;
-}
-
-static inline bool
-acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
-{
- return false;
-}
-
static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
{
return -ENODEV;
diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 0000000000..898f31d536
--- /dev/null
+++ b/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wifi Band Exclusion Interface (AMD ACPI Implementation)
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#ifndef _ACPI_AMD_WBRF_H
+#define _ACPI_AMD_WBRF_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+/* The maximum number of frequency band ranges */
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+/* Record actions */
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+/**
+ * struct freq_band_range - Wifi frequency band range definition
+ * @start: start frequency point (in Hz)
+ * @end: end frequency point (in Hz)
+ */
+struct freq_band_range {
+ u64 start;
+ u64 end;
+};
+
+/**
+ * struct wbrf_ranges_in_out - wbrf ranges info
+ * @num_of_ranges: total number of band ranges in this struct
+ * @band_list: array of Wifi band ranges
+ */
+struct wbrf_ranges_in_out {
+ u64 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+/**
+ * enum wbrf_notifier_actions - wbrf notifier actions index
+ * @WBRF_CHANGED: there was some frequency band updates. The consumers
+ * should retrieve the latest active frequency bands.
+ */
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+#if IS_ENABLED(CONFIG_AMD_WBRF)
+bool acpi_amd_wbrf_supported_producer(struct device *dev);
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in);
+bool acpi_amd_wbrf_supported_consumer(struct device *dev);
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out);
+int amd_wbrf_register_notifier(struct notifier_block *nb);
+int amd_wbrf_unregister_notifier(struct notifier_block *nb);
+#else
+static inline
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return false;
+}
+
+static inline
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ return -ENODEV;
+}
+
+static inline
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return false;
+}
+static inline
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_WBRF */
+
+#endif /* _ACPI_AMD_WBRF_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index f6ea2f57d8..ae0fae70d4 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -19,10 +19,10 @@
struct pci_dev;
struct aer_header_log_regs {
- unsigned int dw0;
- unsigned int dw1;
- unsigned int dw2;
- unsigned int dw3;
+ u32 dw0;
+ u32 dw1;
+ u32 dw2;
+ u32 dw3;
};
struct aer_capability_regs {
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
deleted file mode 100644
index 421b0fa90d..0000000000
--- a/include/linux/amba/clcd-regs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef AMBA_CLCD_REGS_H
-#define AMBA_CLCD_REGS_H
-
-/*
- * CLCD Controller Internal Register addresses
- */
-#define CLCD_TIM0 0x00000000
-#define CLCD_TIM1 0x00000004
-#define CLCD_TIM2 0x00000008
-#define CLCD_TIM3 0x0000000c
-#define CLCD_UBAS 0x00000010
-#define CLCD_LBAS 0x00000014
-
-#define CLCD_PL110_IENB 0x00000018
-#define CLCD_PL110_CNTL 0x0000001c
-#define CLCD_PL110_STAT 0x00000020
-#define CLCD_PL110_INTR 0x00000024
-#define CLCD_PL110_UCUR 0x00000028
-#define CLCD_PL110_LCUR 0x0000002C
-
-#define CLCD_PL111_CNTL 0x00000018
-#define CLCD_PL111_IENB 0x0000001c
-#define CLCD_PL111_RIS 0x00000020
-#define CLCD_PL111_MIS 0x00000024
-#define CLCD_PL111_ICR 0x00000028
-#define CLCD_PL111_UCUR 0x0000002c
-#define CLCD_PL111_LCUR 0x00000030
-
-#define CLCD_PALL 0x00000200
-#define CLCD_PALETTE 0x00000200
-
-#define TIM2_PCD_LO_MASK GENMASK(4, 0)
-#define TIM2_PCD_LO_BITS 5
-#define TIM2_CLKSEL (1 << 5)
-#define TIM2_ACB_MASK GENMASK(10, 6)
-#define TIM2_IVS (1 << 11)
-#define TIM2_IHS (1 << 12)
-#define TIM2_IPC (1 << 13)
-#define TIM2_IOE (1 << 14)
-#define TIM2_BCD (1 << 26)
-#define TIM2_PCD_HI_MASK GENMASK(31, 27)
-#define TIM2_PCD_HI_BITS 5
-#define TIM2_PCD_HI_SHIFT 27
-
-#define CNTL_LCDEN (1 << 0)
-#define CNTL_LCDBPP1 (0 << 1)
-#define CNTL_LCDBPP2 (1 << 1)
-#define CNTL_LCDBPP4 (2 << 1)
-#define CNTL_LCDBPP8 (3 << 1)
-#define CNTL_LCDBPP16 (4 << 1)
-#define CNTL_LCDBPP16_565 (6 << 1)
-#define CNTL_LCDBPP16_444 (7 << 1)
-#define CNTL_LCDBPP24 (5 << 1)
-#define CNTL_LCDBW (1 << 4)
-#define CNTL_LCDTFT (1 << 5)
-#define CNTL_LCDMONO8 (1 << 6)
-#define CNTL_LCDDUAL (1 << 7)
-#define CNTL_BGR (1 << 8)
-#define CNTL_BEBO (1 << 9)
-#define CNTL_BEPO (1 << 10)
-#define CNTL_LCDPWR (1 << 11)
-#define CNTL_LCDVCOMP(x) ((x) << 12)
-#define CNTL_LDMAFIFOTIME (1 << 15)
-#define CNTL_WATERMARK (1 << 16)
-
-/* ST Microelectronics variant bits */
-#define CNTL_ST_1XBPP_444 0x0
-#define CNTL_ST_1XBPP_5551 (1 << 17)
-#define CNTL_ST_1XBPP_565 (1 << 18)
-#define CNTL_ST_CDWID_12 0x0
-#define CNTL_ST_CDWID_16 (1 << 19)
-#define CNTL_ST_CDWID_18 (1 << 20)
-#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20))
-#define CNTL_ST_CEAEN (1 << 21)
-#define CNTL_ST_LCDBPP24_PACKED (6 << 1)
-
-#endif /* AMBA_CLCD_REGS_H */
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
deleted file mode 100644
index b6e0cbeaf5..0000000000
--- a/include/linux/amba/clcd.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel.
- *
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-#include <linux/fb.h>
-#include <linux/amba/clcd-regs.h>
-
-enum {
- /* individual formats */
- CLCD_CAP_RGB444 = (1 << 0),
- CLCD_CAP_RGB5551 = (1 << 1),
- CLCD_CAP_RGB565 = (1 << 2),
- CLCD_CAP_RGB888 = (1 << 3),
- CLCD_CAP_BGR444 = (1 << 4),
- CLCD_CAP_BGR5551 = (1 << 5),
- CLCD_CAP_BGR565 = (1 << 6),
- CLCD_CAP_BGR888 = (1 << 7),
-
- /* connection layouts */
- CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
- CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
- CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
- CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
-
- /* red/blue ordering */
- CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
- CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
- CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
- CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
-
- CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
-};
-
-struct backlight_device;
-
-struct clcd_panel {
- struct fb_videomode mode;
- signed short width; /* width in mm */
- signed short height; /* height in mm */
- u32 tim2;
- u32 tim3;
- u32 cntl;
- u32 caps;
- unsigned int bpp:8,
- fixedtimings:1,
- grayscale:1;
- unsigned int connector;
- struct backlight_device *backlight;
- /*
- * If the B/R lines are switched between the CLCD
- * and the panel we need to know this and not try to
- * compensate with the BGR bit in the control register.
- */
- bool bgr_connection;
-};
-
-struct clcd_regs {
- u32 tim0;
- u32 tim1;
- u32 tim2;
- u32 tim3;
- u32 cntl;
- unsigned long pixclock;
-};
-
-struct clcd_fb;
-
-/*
- * the board-type specific routines
- */
-struct clcd_board {
- const char *name;
-
- /*
- * Optional. Hardware capability flags.
- */
- u32 caps;
-
- /*
- * Optional. Check whether the var structure is acceptable
- * for this display.
- */
- int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var);
-
- /*
- * Compulsory. Decode fb->fb.var into regs->*. In the case of
- * fixed timing, set regs->* to the register values required.
- */
- void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs);
-
- /*
- * Optional. Disable any extra display hardware.
- */
- void (*disable)(struct clcd_fb *);
-
- /*
- * Optional. Enable any extra display hardware.
- */
- void (*enable)(struct clcd_fb *);
-
- /*
- * Setup platform specific parts of CLCD driver
- */
- int (*setup)(struct clcd_fb *);
-
- /*
- * mmap the framebuffer memory
- */
- int (*mmap)(struct clcd_fb *, struct vm_area_struct *);
-
- /*
- * Remove platform specific parts of CLCD driver
- */
- void (*remove)(struct clcd_fb *);
-};
-
-struct amba_device;
-struct clk;
-
-/* this data structure describes each frame buffer device we find */
-struct clcd_fb {
- struct fb_info fb;
- struct amba_device *dev;
- struct clk *clk;
- struct clcd_panel *panel;
- struct clcd_board *board;
- void *board_data;
- void __iomem *regs;
- u16 off_ienb;
- u16 off_cntl;
- u32 clcd_cntl;
- u32 cmap[16];
- bool clk_enabled;
-};
-
-static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val, cpl;
-
- /*
- * Program the CLCD controller registers and start the CLCD
- */
- val = ((var->xres / 16) - 1) << 2;
- val |= (var->hsync_len - 1) << 8;
- val |= (var->right_margin - 1) << 16;
- val |= (var->left_margin - 1) << 24;
- regs->tim0 = val;
-
- val = var->yres;
- if (fb->panel->cntl & CNTL_LCDDUAL)
- val /= 2;
- val -= 1;
- val |= (var->vsync_len - 1) << 10;
- val |= var->lower_margin << 16;
- val |= var->upper_margin << 24;
- regs->tim1 = val;
-
- val = fb->panel->tim2;
- val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
- val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
-
- cpl = var->xres_virtual;
- if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
- /* / 1 */;
- else if (!var->grayscale) /* STN color */
- cpl = cpl * 8 / 3;
- else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
- cpl /= 8;
- else /* STN monochrome, 4bit */
- cpl /= 4;
-
- regs->tim2 = val | ((cpl - 1) << 16);
-
- regs->tim3 = fb->panel->tim3;
-
- val = fb->panel->cntl;
- if (var->grayscale)
- val |= CNTL_LCDBW;
-
- if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
- /*
- * if board and panel supply capabilities, we can support
- * changing BGR/RGB depending on supplied parameters. Here
- * we switch to what the framebuffer is providing if need
- * be, so if the framebuffer is BGR but the display connection
- * is RGB (first case) we switch it around. Vice versa mutatis
- * mutandis if the framebuffer is RGB but the display connection
- * is BGR, we flip it around.
- */
- if (var->red.offset == 0)
- val &= ~CNTL_BGR;
- else
- val |= CNTL_BGR;
- if (fb->panel->bgr_connection)
- val ^= CNTL_BGR;
- }
-
- switch (var->bits_per_pixel) {
- case 1:
- val |= CNTL_LCDBPP1;
- break;
- case 2:
- val |= CNTL_LCDBPP2;
- break;
- case 4:
- val |= CNTL_LCDBPP4;
- break;
- case 8:
- val |= CNTL_LCDBPP8;
- break;
- case 16:
- /*
- * PL110 cannot choose between 5551 and 565 modes in its
- * control register. It is possible to use 565 with
- * custom external wiring.
- */
- if (amba_part(fb->dev) == 0x110 ||
- var->green.length == 5)
- val |= CNTL_LCDBPP16;
- else if (var->green.length == 6)
- val |= CNTL_LCDBPP16_565;
- else
- val |= CNTL_LCDBPP16_444;
- break;
- case 32:
- val |= CNTL_LCDBPP24;
- break;
- }
-
- regs->cntl = val;
- regs->pixclock = var->pixclock;
-}
-
-static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- var->xres_virtual = var->xres = (var->xres + 15) & ~15;
- var->yres_virtual = var->yres = (var->yres + 1) & ~1;
-
-#define CHECK(e,l,h) (var->e < l || var->e > h)
- if (CHECK(right_margin, (5+1), 256) || /* back porch */
- CHECK(left_margin, (5+1), 256) || /* front porch */
- CHECK(hsync_len, (5+1), 256) ||
- var->xres > 4096 ||
- var->lower_margin > 255 || /* back porch */
- var->upper_margin > 255 || /* front porch */
- var->vsync_len > 32 ||
- var->yres > 1024)
- return -EINVAL;
-#undef CHECK
-
- /* single panel mode: PCD = max(PCD, 1) */
- /* dual panel mode: PCD = max(PCD, 5) */
-
- /*
- * You can't change the grayscale setting, and
- * we can only do non-interlaced video.
- */
- if (var->grayscale != fb->fb.var.grayscale ||
- (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
- return -EINVAL;
-
-#define CHECK(e) (var->e != fb->fb.var.e)
- if (fb->panel->fixedtimings &&
- (CHECK(xres) ||
- CHECK(yres) ||
- CHECK(bits_per_pixel) ||
- CHECK(pixclock) ||
- CHECK(left_margin) ||
- CHECK(right_margin) ||
- CHECK(upper_margin) ||
- CHECK(lower_margin) ||
- CHECK(hsync_len) ||
- CHECK(vsync_len) ||
- CHECK(sync)))
- return -EINVAL;
-#undef CHECK
-
- var->nonstd = 0;
- var->accel_flags = 0;
-
- return 0;
-}
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index a1307b58cc..9120de05ea 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -10,6 +10,11 @@
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#endif
+
#include <linux/types.h>
/* -------------------------------------------------------------------------------
@@ -70,141 +75,145 @@
#define ZX_UART011_ICR 0x4c
#define ZX_UART011_DMACR 0x50
-#define UART011_DR_OE (1 << 11)
-#define UART011_DR_BE (1 << 10)
-#define UART011_DR_PE (1 << 9)
-#define UART011_DR_FE (1 << 8)
-
-#define UART01x_RSR_OE 0x08
-#define UART01x_RSR_BE 0x04
-#define UART01x_RSR_PE 0x02
-#define UART01x_RSR_FE 0x01
-
-#define UART011_FR_RI 0x100
-#define UART011_FR_TXFE 0x080
-#define UART011_FR_RXFF 0x040
-#define UART01x_FR_TXFF 0x020
-#define UART01x_FR_RXFE 0x010
-#define UART01x_FR_BUSY 0x008
-#define UART01x_FR_DCD 0x004
-#define UART01x_FR_DSR 0x002
-#define UART01x_FR_CTS 0x001
+#define UART011_DR_OE BIT(11)
+#define UART011_DR_BE BIT(10)
+#define UART011_DR_PE BIT(9)
+#define UART011_DR_FE BIT(8)
+
+#define UART01x_RSR_OE BIT(3)
+#define UART01x_RSR_BE BIT(2)
+#define UART01x_RSR_PE BIT(1)
+#define UART01x_RSR_FE BIT(0)
+
+#define UART011_FR_RI BIT(8)
+#define UART011_FR_TXFE BIT(7)
+#define UART011_FR_RXFF BIT(6)
+#define UART01x_FR_TXFF (1 << 5) /* used in ASM */
+#define UART01x_FR_RXFE BIT(4)
+#define UART01x_FR_BUSY (1 << 3) /* used in ASM */
+#define UART01x_FR_DCD BIT(2)
+#define UART01x_FR_DSR BIT(1)
+#define UART01x_FR_CTS BIT(0)
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
/*
* Some bits of Flag Register on ZTE device have different position from
* standard ones.
*/
-#define ZX_UART01x_FR_BUSY 0x100
-#define ZX_UART01x_FR_DSR 0x008
-#define ZX_UART01x_FR_CTS 0x002
-#define ZX_UART011_FR_RI 0x001
-
-#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
-#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
-#define UART011_CR_OUT2 0x2000 /* OUT2 */
-#define UART011_CR_OUT1 0x1000 /* OUT1 */
-#define UART011_CR_RTS 0x0800 /* RTS */
-#define UART011_CR_DTR 0x0400 /* DTR */
-#define UART011_CR_RXE 0x0200 /* receive enable */
-#define UART011_CR_TXE 0x0100 /* transmit enable */
-#define UART011_CR_LBE 0x0080 /* loopback enable */
-#define UART010_CR_RTIE 0x0040
-#define UART010_CR_TIE 0x0020
-#define UART010_CR_RIE 0x0010
-#define UART010_CR_MSIE 0x0008
-#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */
-#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */
-#define UART01x_CR_SIREN 0x0002 /* SIR enable */
-#define UART01x_CR_UARTEN 0x0001 /* UART enable */
-
-#define UART011_LCRH_SPS 0x80
+#define ZX_UART01x_FR_BUSY BIT(8)
+#define ZX_UART01x_FR_DSR BIT(3)
+#define ZX_UART01x_FR_CTS BIT(1)
+#define ZX_UART011_FR_RI BIT(0)
+
+#define UART011_CR_CTSEN BIT(15) /* CTS hardware flow control */
+#define UART011_CR_RTSEN BIT(14) /* RTS hardware flow control */
+#define UART011_CR_OUT2 BIT(13) /* OUT2 */
+#define UART011_CR_OUT1 BIT(12) /* OUT1 */
+#define UART011_CR_RTS BIT(11) /* RTS */
+#define UART011_CR_DTR BIT(10) /* DTR */
+#define UART011_CR_RXE BIT(9) /* receive enable */
+#define UART011_CR_TXE BIT(8) /* transmit enable */
+#define UART011_CR_LBE BIT(7) /* loopback enable */
+#define UART010_CR_RTIE BIT(6)
+#define UART010_CR_TIE BIT(5)
+#define UART010_CR_RIE BIT(4)
+#define UART010_CR_MSIE BIT(3)
+#define ST_UART011_CR_OVSFACT BIT(3) /* Oversampling factor */
+#define UART01x_CR_IIRLP BIT(2) /* SIR low power mode */
+#define UART01x_CR_SIREN BIT(1) /* SIR enable */
+#define UART01x_CR_UARTEN BIT(0) /* UART enable */
+
+#define UART011_LCRH_SPS BIT(7)
#define UART01x_LCRH_WLEN_8 0x60
#define UART01x_LCRH_WLEN_7 0x40
#define UART01x_LCRH_WLEN_6 0x20
#define UART01x_LCRH_WLEN_5 0x00
-#define UART01x_LCRH_FEN 0x10
-#define UART01x_LCRH_STP2 0x08
-#define UART01x_LCRH_EPS 0x04
-#define UART01x_LCRH_PEN 0x02
-#define UART01x_LCRH_BRK 0x01
-
-#define ST_UART011_DMAWM_RX_1 (0 << 3)
-#define ST_UART011_DMAWM_RX_2 (1 << 3)
-#define ST_UART011_DMAWM_RX_4 (2 << 3)
-#define ST_UART011_DMAWM_RX_8 (3 << 3)
-#define ST_UART011_DMAWM_RX_16 (4 << 3)
-#define ST_UART011_DMAWM_RX_32 (5 << 3)
-#define ST_UART011_DMAWM_RX_48 (6 << 3)
-#define ST_UART011_DMAWM_TX_1 0
-#define ST_UART011_DMAWM_TX_2 1
-#define ST_UART011_DMAWM_TX_4 2
-#define ST_UART011_DMAWM_TX_8 3
-#define ST_UART011_DMAWM_TX_16 4
-#define ST_UART011_DMAWM_TX_32 5
-#define ST_UART011_DMAWM_TX_48 6
-
-#define UART010_IIR_RTIS 0x08
-#define UART010_IIR_TIS 0x04
-#define UART010_IIR_RIS 0x02
-#define UART010_IIR_MIS 0x01
-
-#define UART011_IFLS_RX1_8 (0 << 3)
-#define UART011_IFLS_RX2_8 (1 << 3)
-#define UART011_IFLS_RX4_8 (2 << 3)
-#define UART011_IFLS_RX6_8 (3 << 3)
-#define UART011_IFLS_RX7_8 (4 << 3)
-#define UART011_IFLS_TX1_8 (0 << 0)
-#define UART011_IFLS_TX2_8 (1 << 0)
-#define UART011_IFLS_TX4_8 (2 << 0)
-#define UART011_IFLS_TX6_8 (3 << 0)
-#define UART011_IFLS_TX7_8 (4 << 0)
+#define UART01x_LCRH_FEN BIT(4)
+#define UART01x_LCRH_STP2 BIT(3)
+#define UART01x_LCRH_EPS BIT(2)
+#define UART01x_LCRH_PEN BIT(1)
+#define UART01x_LCRH_BRK BIT(0)
+
+#define ST_UART011_DMAWM_RX GENMASK(5, 3)
+#define ST_UART011_DMAWM_RX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 0)
+#define ST_UART011_DMAWM_RX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 1)
+#define ST_UART011_DMAWM_RX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 2)
+#define ST_UART011_DMAWM_RX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 3)
+#define ST_UART011_DMAWM_RX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 4)
+#define ST_UART011_DMAWM_RX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 5)
+#define ST_UART011_DMAWM_RX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 6)
+#define ST_UART011_DMAWM_TX GENMASK(2, 0)
+#define ST_UART011_DMAWM_TX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 0)
+#define ST_UART011_DMAWM_TX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 1)
+#define ST_UART011_DMAWM_TX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 2)
+#define ST_UART011_DMAWM_TX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 3)
+#define ST_UART011_DMAWM_TX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 4)
+#define ST_UART011_DMAWM_TX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 5)
+#define ST_UART011_DMAWM_TX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 6)
+
+#define UART010_IIR_RTIS BIT(3)
+#define UART010_IIR_TIS BIT(2)
+#define UART010_IIR_RIS BIT(1)
+#define UART010_IIR_MIS BIT(0)
+
+#define UART011_IFLS_RXIFLSEL GENMASK(5, 3)
+#define UART011_IFLS_RX1_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 0)
+#define UART011_IFLS_RX2_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 1)
+#define UART011_IFLS_RX4_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 2)
+#define UART011_IFLS_RX6_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 3)
+#define UART011_IFLS_RX7_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 4)
+#define UART011_IFLS_TXIFLSEL GENMASK(2, 0)
+#define UART011_IFLS_TX1_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 0)
+#define UART011_IFLS_TX2_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 1)
+#define UART011_IFLS_TX4_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 2)
+#define UART011_IFLS_TX6_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 3)
+#define UART011_IFLS_TX7_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 4)
/* special values for ST vendor with deeper fifo */
-#define UART011_IFLS_RX_HALF (5 << 3)
-#define UART011_IFLS_TX_HALF (5 << 0)
-
-#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
-#define UART011_BEIM (1 << 9) /* break error interrupt mask */
-#define UART011_PEIM (1 << 8) /* parity error interrupt mask */
-#define UART011_FEIM (1 << 7) /* framing error interrupt mask */
-#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */
-#define UART011_TXIM (1 << 5) /* transmit interrupt mask */
-#define UART011_RXIM (1 << 4) /* receive interrupt mask */
-#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */
-#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */
-#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */
-#define UART011_RIMIM (1 << 0) /* RI interrupt mask */
-
-#define UART011_OEIS (1 << 10) /* overrun error interrupt status */
-#define UART011_BEIS (1 << 9) /* break error interrupt status */
-#define UART011_PEIS (1 << 8) /* parity error interrupt status */
-#define UART011_FEIS (1 << 7) /* framing error interrupt status */
-#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */
-#define UART011_TXIS (1 << 5) /* transmit interrupt status */
-#define UART011_RXIS (1 << 4) /* receive interrupt status */
-#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */
-#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */
-#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */
-#define UART011_RIMIS (1 << 0) /* RI interrupt status */
-
-#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */
-#define UART011_BEIC (1 << 9) /* break error interrupt clear */
-#define UART011_PEIC (1 << 8) /* parity error interrupt clear */
-#define UART011_FEIC (1 << 7) /* framing error interrupt clear */
-#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */
-#define UART011_TXIC (1 << 5) /* transmit interrupt clear */
-#define UART011_RXIC (1 << 4) /* receive interrupt clear */
-#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */
-#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */
-#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */
-#define UART011_RIMIC (1 << 0) /* RI interrupt clear */
-
-#define UART011_DMAONERR (1 << 2) /* disable dma on error */
-#define UART011_TXDMAE (1 << 1) /* enable transmit dma */
-#define UART011_RXDMAE (1 << 0) /* enable receive dma */
-
-#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
-#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
+#define UART011_IFLS_RX_HALF FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 5)
+#define UART011_IFLS_TX_HALF FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 5)
+
+#define UART011_OEIM BIT(10) /* overrun error interrupt mask */
+#define UART011_BEIM BIT(9) /* break error interrupt mask */
+#define UART011_PEIM BIT(8) /* parity error interrupt mask */
+#define UART011_FEIM BIT(7) /* framing error interrupt mask */
+#define UART011_RTIM BIT(6) /* receive timeout interrupt mask */
+#define UART011_TXIM BIT(5) /* transmit interrupt mask */
+#define UART011_RXIM BIT(4) /* receive interrupt mask */
+#define UART011_DSRMIM BIT(3) /* DSR interrupt mask */
+#define UART011_DCDMIM BIT(2) /* DCD interrupt mask */
+#define UART011_CTSMIM BIT(1) /* CTS interrupt mask */
+#define UART011_RIMIM BIT(0) /* RI interrupt mask */
+
+#define UART011_OEIS BIT(10) /* overrun error interrupt status */
+#define UART011_BEIS BIT(9) /* break error interrupt status */
+#define UART011_PEIS BIT(8) /* parity error interrupt status */
+#define UART011_FEIS BIT(7) /* framing error interrupt status */
+#define UART011_RTIS BIT(6) /* receive timeout interrupt status */
+#define UART011_TXIS BIT(5) /* transmit interrupt status */
+#define UART011_RXIS BIT(4) /* receive interrupt status */
+#define UART011_DSRMIS BIT(3) /* DSR interrupt status */
+#define UART011_DCDMIS BIT(2) /* DCD interrupt status */
+#define UART011_CTSMIS BIT(1) /* CTS interrupt status */
+#define UART011_RIMIS BIT(0) /* RI interrupt status */
+
+#define UART011_OEIC BIT(10) /* overrun error interrupt clear */
+#define UART011_BEIC BIT(9) /* break error interrupt clear */
+#define UART011_PEIC BIT(8) /* parity error interrupt clear */
+#define UART011_FEIC BIT(7) /* framing error interrupt clear */
+#define UART011_RTIC BIT(6) /* receive timeout interrupt clear */
+#define UART011_TXIC BIT(5) /* transmit interrupt clear */
+#define UART011_RXIC BIT(4) /* receive interrupt clear */
+#define UART011_DSRMIC BIT(3) /* DSR interrupt clear */
+#define UART011_DCDMIC BIT(2) /* DCD interrupt clear */
+#define UART011_CTSMIC BIT(1) /* CTS interrupt clear */
+#define UART011_RIMIC BIT(0) /* RI interrupt clear */
+
+#define UART011_DMAONERR BIT(2) /* disable dma on error */
+#define UART011_TXDMAE BIT(1) /* enable transmit dma */
+#define UART011_RXDMAE BIT(0) /* enable receive dma */
+
+#define UART01x_RSR_ANY (UART01x_RSR_OE | UART01x_RSR_BE | UART01x_RSR_PE | UART01x_RSR_FE)
+#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD | UART01x_FR_DSR | UART01x_FR_CTS)
#ifndef __ASSEMBLY__
struct amba_device; /* in uncompress this is included but amba/bus.h is not */
@@ -220,8 +229,8 @@ struct amba_pl011_data {
bool dma_rx_poll_enable;
unsigned int dma_rx_poll_rate;
unsigned int dma_rx_poll_timeout;
- void (*init) (void);
- void (*exit) (void);
+ void (*init)(void);
+ void (*exit)(void);
};
#endif
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
new file mode 100644
index 0000000000..b4f8182052
--- /dev/null
+++ b/include/linux/amd-pmf-io.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Interface
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#ifndef AMD_PMF_IO_H
+#define AMD_PMF_IO_H
+
+#include <linux/types.h>
+
+/**
+ * enum sfh_message_type - Query the SFH message type
+ * @MT_HPD: Message ID to know the Human presence info from MP2 FW
+ * @MT_ALS: Message ID to know the Ambient light info from MP2 FW
+ */
+enum sfh_message_type {
+ MT_HPD,
+ MT_ALS,
+};
+
+/**
+ * enum sfh_hpd_info - Query the Human presence information
+ * @SFH_NOT_DETECTED: Check the HPD connection information from MP2 FW
+ * @SFH_USER_PRESENT: Check if the user is present from HPD sensor
+ * @SFH_USER_AWAY: Check if the user is away from HPD sensor
+ */
+enum sfh_hpd_info {
+ SFH_NOT_DETECTED,
+ SFH_USER_PRESENT,
+ SFH_USER_AWAY,
+};
+
+/**
+ * struct amd_sfh_info - get HPD sensor info from MP2 FW
+ * @ambient_light: Populates the ambient light information
+ * @user_present: Populates the user presence information
+ */
+struct amd_sfh_info {
+ u32 ambient_light;
+ u8 user_present;
+};
+
+int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
+#endif
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 5deaddbd79..93a5f16d03 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -15,13 +15,13 @@ struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
-struct file *anon_inode_getfile_secure(const char *name,
+struct file *anon_inode_create_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
-int anon_inode_getfd_secure(const char *name,
+int anon_inode_create_getfd(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode);
diff --git a/include/linux/apple-mailbox.h b/include/linux/apple-mailbox.h
deleted file mode 100644
index 720fbb7029..0000000000
--- a/include/linux/apple-mailbox.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
-/*
- * Apple mailbox message format
- *
- * Copyright (C) 2021 The Asahi Linux Contributors
- */
-
-#ifndef _LINUX_APPLE_MAILBOX_H_
-#define _LINUX_APPLE_MAILBOX_H_
-
-#include <linux/types.h>
-
-/* encodes a single 96bit message sent over the single channel */
-struct apple_mbox_msg {
- u64 msg0;
- u32 msg1;
-};
-
-#endif
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index a07b510e7d..a63d61ca55 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,6 +27,13 @@ static inline unsigned long topology_get_cpu_scale(int cpu)
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
+
+static inline unsigned long topology_get_freq_ref(int cpu)
+{
+ return per_cpu(capacity_freq_ref, cpu);
+}
+
DECLARE_PER_CPU(unsigned long, arch_freq_scale);
static inline unsigned long topology_get_freq_scale(int cpu)
@@ -92,6 +99,7 @@ void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
+void freq_inv_set_max_ratio(int cpu, u64 max_rate);
#endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 51b1b7054a..0050ef288a 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -36,6 +36,7 @@ struct mqstat;
struct audit_watch;
struct audit_tree;
struct sk_buff;
+struct kern_ipc_perm;
struct audit_krule {
u32 pflags;
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 99ae7960a8..8e177b67e8 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -119,6 +119,7 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
/* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
@@ -912,6 +913,29 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_HFUNC
+ * VF sends this message to configure the RSS hash function. Only supported
+ * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation.
+ * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
+ * by the PF.
+ */
+struct virtchnl_rss_hfunc {
+ u16 vsi_id;
+ u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
+ u32 reserved;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
+
/* VIRTCHNL_OP_ENABLE_CHANNELS
* VIRTCHNL_OP_DISABLE_CHANNELS
* VF sends these messages to enable or disable channels based on
@@ -1096,14 +1120,6 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
-/* Type of RSS algorithm */
-enum virtchnl_rss_algorithm {
- VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
- VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
- VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
- VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
-};
-
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
@@ -1543,6 +1559,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
vrl->lut_entries);
}
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ valid_len = sizeof(struct virtchnl_rss_hfunc);
+ break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
new file mode 100644
index 0000000000..3f1fe1774f
--- /dev/null
+++ b/include/linux/backing-file.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#ifndef _LINUX_BACKING_FILE_H
+#define _LINUX_BACKING_FILE_H
+
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/fs.h>
+
+struct backing_file_ctx {
+ const struct cred *cred;
+ struct file *user_file;
+ void (*accessed)(struct file *);
+ void (*end_write)(struct file *);
+};
+
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred);
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx);
+
+#endif /* _LINUX_BACKING_FILE_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0286bada25..875d792bff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -327,6 +327,8 @@ enum bip_flags {
BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
+ BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */
+ BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */
};
/*
@@ -721,6 +723,7 @@ static inline bool bioset_initialized(struct bio_set *bs)
for_each_bio(_bio) \
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
+int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_prep(struct bio *);
@@ -792,6 +795,12 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
return 0;
}
+static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
+ ssize_t len, u32 seed)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
/*
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 1ab3081c82..7a8150a5f0 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -391,9 +391,6 @@ struct blk_mq_hw_ctx {
*/
struct blk_mq_tags *sched_tags;
- /** @run: Number of dispatched requests. */
- unsigned long run;
-
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
/** @queue_num: Index of this hardware queue. */
@@ -830,6 +827,12 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib);
*/
static inline bool blk_mq_need_time_stamp(struct request *rq)
{
+ /*
+ * passthrough io doesn't use iostat accounting, cgroup stats
+ * and io scheduler functionalities.
+ */
+ if (blk_rq_is_passthrough(rq))
+ return false;
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index b29ebd5341..f288c94374 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -57,20 +57,18 @@ struct block_device {
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;
struct mutex bd_holder_lock;
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
int bd_holders;
struct kobject *bd_holder_dir;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
- struct super_block *bd_fsfreeze_sb;
+ atomic_t bd_fsfreeze_count; /* number of freeze requests */
+ struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
struct partition_meta_info *bd_meta_info;
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool bd_make_it_fail;
#endif
bool bd_ro_warned;
+ int bd_writers;
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
@@ -380,6 +378,8 @@ enum req_op {
REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
+ /* write data at the current zone write pointer */
+ REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
@@ -388,12 +388,10 @@ enum req_op {
REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
/* Transition a zone to full */
REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
- /* write data at the current zone write pointer */
- REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
/* reset a zone write pointer */
- REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)13,
/* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15,
/* Driver private requests */
REQ_OP_DRV_IN = (__force blk_opf_t)34,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 88e9dd4b71..b43ca3b9d2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -124,6 +124,10 @@ typedef unsigned int __bitwise blk_mode_t;
#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
/* open for "writes" only for ioctls (specialy hack for floppy.c) */
#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
struct gendisk {
/*
@@ -264,18 +268,6 @@ static inline bool blk_op_is_passthrough(blk_opf_t op)
}
/*
- * Zoned block device models (zoned limit).
- *
- * Note: This needs to be ordered from the least to the most severe
- * restrictions for the inheritance in blk_stack_limits() to work.
- */
-enum blk_zoned_model {
- BLK_ZONED_NONE = 0, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
-};
-
-/*
* BLK_BOUNCE_NONE: never bounce (default)
* BLK_BOUNCE_HIGH: bounce all highmem pages
*/
@@ -316,7 +308,7 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
- enum blk_zoned_model zoned;
+ bool zoned;
/*
* Drivers that set dma_alignment to less than 511 must be prepared to
@@ -329,24 +321,15 @@ struct queue_limits {
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
-void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
+void disk_set_zoned(struct gendisk *disk);
-#ifdef CONFIG_BLK_DEV_ZONED
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
-unsigned int bdev_nr_zones(struct block_device *bdev);
-extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
- sector_t sectors, sector_t nr_sectors,
- gfp_t gfp_mask);
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
+ sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask);
int blk_revalidate_disk_zones(struct gendisk *disk,
- void (*update_driver_data)(struct gendisk *disk));
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- return 0;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
+ void (*update_driver_data)(struct gendisk *disk));
/*
* Independent access ranges: struct blk_independent_access_range describes
@@ -376,59 +359,51 @@ struct blk_independent_access_ranges {
};
struct request_queue {
- struct request *last_merge;
- struct elevator_queue *elevator;
-
- struct percpu_ref q_usage_counter;
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
- struct blk_queue_stats *stats;
- struct rq_qos *rq_qos;
- struct mutex rq_qos_mutex;
+ struct elevator_queue *elevator;
const struct blk_mq_ops *mq_ops;
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
+ /*
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+
+ unsigned int rq_timeout;
+
unsigned int queue_depth;
+ refcount_t refs;
+
/* hw dispatch queues */
- struct xarray hctx_table;
unsigned int nr_hw_queues;
+ struct xarray hctx_table;
- /*
- * The queue owner gets to use this for whatever they like.
- * ll_rw_blk doesn't touch it.
- */
- void *queuedata;
-
- /*
- * various queue flags, see QUEUE_* below
- */
- unsigned long queue_flags;
- /*
- * Number of contexts that have called blk_set_pm_only(). If this
- * counter is above zero then only RQF_PM requests are processed.
- */
- atomic_t pm_only;
+ struct percpu_ref q_usage_counter;
- /*
- * ida allocated id for this queue. Used to index queues from
- * ioctx.
- */
- int id;
+ struct request *last_merge;
spinlock_t queue_lock;
- struct gendisk *disk;
+ int quiesce_depth;
- refcount_t refs;
+ struct gendisk *disk;
/*
* mq queue kobject
*/
struct kobject *mq_kobj;
+ struct queue_limits limits;
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -439,24 +414,40 @@ struct request_queue {
#endif
/*
- * queue settings
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM requests are processed.
*/
- unsigned long nr_requests; /* Max # of requests */
+ atomic_t pm_only;
+
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+ struct mutex rq_qos_mutex;
+
+ /*
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
+ */
+ int id;
unsigned int dma_pad_mask;
+ /*
+ * queue settings
+ */
+ unsigned long nr_requests; /* Max # of requests */
+
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
struct kobject *crypto_kobject;
#endif
- unsigned int rq_timeout;
-
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_tags;
+ unsigned int required_elevator_features;
+
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
@@ -467,11 +458,12 @@ struct request_queue {
struct mutex blkcg_mutex;
#endif
- struct queue_limits limits;
+ int node;
- unsigned int required_elevator_features;
+ spinlock_t requeue_lock;
+ struct list_head requeue_list;
+ struct delayed_work requeue_work;
- int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
#endif
@@ -481,10 +473,6 @@ struct request_queue {
struct blk_flush_queue *fq;
struct list_head flush_list;
- struct list_head requeue_list;
- spinlock_t requeue_lock;
- struct delayed_work requeue_work;
-
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
@@ -509,8 +497,6 @@ struct request_queue {
*/
struct mutex mq_freeze_lock;
- int quiesce_depth;
-
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
@@ -623,26 +609,14 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q)
}
#endif
-static inline enum blk_zoned_model
-blk_queue_zoned_model(struct request_queue *q)
-{
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
- return q->limits.zoned;
- return BLK_ZONED_NONE;
-}
-
static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- case BLK_ZONED_HM:
- return true;
- default:
- return false;
- }
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned;
}
#ifdef CONFIG_BLK_DEV_ZONED
+unsigned int bdev_nr_zones(struct block_device *bdev);
+
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
@@ -687,6 +661,11 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
}
#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return 0;
+}
+
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
@@ -1080,7 +1059,14 @@ enum blk_default_limits {
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define BLK_DEF_MAX_SECTORS 2560u
+/*
+ * Default upper limit for the software max_sectors limit used for
+ * regular file system I/O. This can be increased through sysfs.
+ *
+ * Not to be confused with the max_hw_sector limit that is entirely
+ * controlled by the driver, usually based on hardware limits.
+ */
+#define BLK_DEF_MAX_SECTORS_CAP 2560u
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
@@ -1259,11 +1245,6 @@ static inline bool bdev_nowait(struct block_device *bdev)
return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
-static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
-{
- return blk_queue_zoned_model(bdev_get_queue(bdev));
-}
-
static inline bool bdev_is_zoned(struct block_device *bdev)
{
return blk_queue_is_zoned(bdev_get_queue(bdev));
@@ -1468,8 +1449,23 @@ struct blk_holder_ops {
* Sync the file system mounted on the block device.
*/
void (*sync)(struct block_device *bdev);
+
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
+
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
};
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
extern const struct blk_holder_ops fs_holder_ops;
/*
@@ -1477,7 +1473,8 @@ extern const struct blk_holder_ops fs_holder_ops;
* as stored in sb->s_flags.
*/
#define sb_open_mode(flags) \
- (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
struct bdev_handle {
struct block_device *bdev;
@@ -1485,10 +1482,6 @@ struct bdev_handle {
blk_mode_t mode;
};
-struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
- const struct blk_holder_ops *hops);
-struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
- void *holder, const struct blk_holder_ops *hops);
struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
@@ -1496,7 +1489,6 @@ struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
-void blkdev_put(struct block_device *bdev, void *holder);
void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
@@ -1541,8 +1533,8 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
}
#endif /* CONFIG_BLOCK */
-int freeze_bdev(struct block_device *bdev);
-int thaw_bdev(struct block_device *bdev);
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
struct io_comp_batch {
struct request *req_list;
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index ca73940e26..3f4b4ac527 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -10,6 +10,7 @@
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
+bool __init cmdline_has_extra_options(void);
#else /* !__KERNEL__ */
/*
* NOTE: This is only for tools/bootconfig, because tools/bootconfig will
@@ -287,7 +288,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_exit(void);
+void __init _xbc_exit(bool early);
+
+static inline void xbc_exit(void)
+{
+ _xbc_exit(false);
+}
/* XBC embedded bootconfig data in kernel */
#ifdef CONFIG_BOOT_CONFIG_EMBED
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index cfc6d2f980..7d719976b5 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -29,6 +29,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/static_call.h>
#include <linux/memcontrol.h>
+#include <linux/cfi.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -190,8 +191,8 @@ enum btf_field_type {
BPF_LIST_NODE = (1 << 6),
BPF_RB_ROOT = (1 << 7),
BPF_RB_NODE = (1 << 8),
- BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
- BPF_RB_NODE | BPF_RB_ROOT,
+ BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
+ BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
BPF_REFCOUNT = (1 << 9),
};
@@ -297,6 +298,8 @@ struct bpf_map {
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
bool free_after_mult_rcu_gp;
+ bool free_after_rcu_gp;
+ atomic64_t sleepable_refcnt;
s64 __percpu *elem_count;
};
@@ -1053,6 +1056,17 @@ struct btf_func_model {
*/
#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
+/*
+ * Indicate the trampoline should be suitable to receive indirect calls;
+ * without this indirectly calling the generated code can result in #UD/#CP,
+ * depending on the CFI options.
+ *
+ * Used by bpf_struct_ops.
+ *
+ * Incompatible with FENTRY usage, overloads @func_addr argument.
+ */
+#define BPF_TRAMP_F_INDIRECT BIT(8)
+
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86.
*/
@@ -1092,10 +1106,17 @@ struct bpf_tramp_run_ctx;
* fexit = a set of program to run after original function
*/
struct bpf_tramp_image;
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks,
- void *orig_call);
+ void *func_addr);
+void *arch_alloc_bpf_trampoline(unsigned int size);
+void arch_free_bpf_trampoline(void *image, unsigned int size);
+void arch_protect_bpf_trampoline(void *image, unsigned int size);
+void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr);
+
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
@@ -1128,6 +1149,7 @@ enum bpf_tramp_prog_type {
struct bpf_tramp_image {
void *image;
+ int size;
struct bpf_ksym ksym;
struct percpu_ref pcref;
void *ip_after_call;
@@ -1197,7 +1219,11 @@ struct bpf_dispatcher {
#endif
};
-static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
+#ifndef __bpfcall
+#define __bpfcall __nocfi
+#endif
+
+static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
bpf_func_t bpf_func)
@@ -1235,6 +1261,8 @@ enum bpf_dynptr_type {
int bpf_dynptr_check_size(u32 size);
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
+void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
@@ -1287,7 +1315,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
#define DEFINE_BPF_DISPATCHER(name) \
__BPF_DISPATCHER_SC(name); \
- noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
+ noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func) \
@@ -1310,7 +1338,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
-void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
@@ -1354,6 +1382,8 @@ static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
+ bool called : 1;
+ bool verified : 1;
};
enum bpf_jit_poke_reason {
@@ -1419,6 +1449,7 @@ struct bpf_prog_aux {
bool dev_bound; /* Program is bound to the netdev. */
bool offload_requested; /* Program is bound and offloaded to the netdev. */
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool attach_tracing_prog; /* true if tracing another tracing program */
bool func_proto_unreliable;
bool sleepable;
bool tail_call_reachable;
@@ -1435,6 +1466,9 @@ struct bpf_prog_aux {
struct bpf_kfunc_desc_tab *kfunc_tab;
struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab;
+#ifdef CONFIG_FINEIBT
+ struct bpf_ksym ksym_prefix;
+#endif
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
@@ -1534,12 +1568,26 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
- struct work_struct work;
+ /* rcu is used before freeing, work can be used to schedule that
+ * RCU-based freeing before that, so they never overlap
+ */
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
};
struct bpf_link_ops {
void (*release)(struct bpf_link *link);
+ /* deallocate link resources callback, called without RCU grace period
+ * waiting
+ */
void (*dealloc)(struct bpf_link *link);
+ /* deallocate link resources callback, called after RCU grace period;
+ * if underlying BPF program is sleepable we go through tasks trace
+ * RCU GP and then "classic" RCU GP
+ */
+ void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog);
@@ -1645,6 +1693,7 @@ struct bpf_struct_ops {
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
u32 type_id;
u32 value_id;
+ void *cfi_stubs;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
@@ -1658,6 +1707,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
struct bpf_tramp_link *link,
const struct btf_func_model *model,
+ void *stub_func,
void *image, void *image_end);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
@@ -2431,12 +2481,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf_func_model *m);
struct bpf_reg_state;
-int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs);
-int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs);
-int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg, bool is_ex_cb);
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index bb1223b213..aaf004d943 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -11,6 +11,7 @@ struct bpf_mem_caches;
struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
+ struct obj_cgroup *objcg;
bool percpu;
struct work_struct work;
};
@@ -21,8 +22,15 @@ struct bpf_mem_alloc {
* 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
* Alloc and free are done with bpf_mem_{alloc,free}() and the size of
* the returned object is given by the size argument of bpf_mem_alloc().
+ * If percpu equals true, error will be returned in order to avoid
+ * large memory consumption and the below bpf_mem_alloc_percpu_unit_init()
+ * should be used to do on-demand per-cpu allocation for each size.
*/
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+/* Initialize a non-fix-size percpu memory allocator */
+int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg);
+/* The percpu allocation with a specific unit size. */
+int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
/* kmalloc/kfree equivalent: */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index aa4d19d0bc..d07d857ca6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -275,6 +275,11 @@ struct bpf_reference_state {
int callback_ref;
};
+struct bpf_retval_range {
+ s32 minval;
+ s32 maxval;
+};
+
/* state of the program:
* type of all registers and stack info
*/
@@ -297,8 +302,8 @@ struct bpf_func_state {
* void foo(void) { bpf_timer_set_callback(,foo); }
*/
u32 async_entry_cnt;
+ struct bpf_retval_range callback_ret_range;
bool in_callback_fn;
- struct tnum callback_ret_range;
bool in_async_callback_fn;
bool in_exception_callback_fn;
/* For callback calling functions that limit number of possible
@@ -316,16 +321,48 @@ struct bpf_func_state {
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
- int allocated_stack;
+ /* The state of the stack. Each element of the array describes BPF_REG_SIZE
+ * (i.e. 8) bytes worth of stack memory.
+ * stack[0] represents bytes [*(r10-8)..*(r10-1)]
+ * stack[1] represents bytes [*(r10-16)..*(r10-9)]
+ * ...
+ * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
+ */
struct bpf_stack_state *stack;
+ /* Size of the current stack, in bytes. The stack state is tracked below, in
+ * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
+ */
+ int allocated_stack;
+};
+
+#define MAX_CALL_FRAMES 8
+
+/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+enum {
+ /* instruction references stack slot through PTR_TO_STACK register;
+ * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
+ * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
+ * 8 bytes per slot, so slot index (spi) is [0, 63])
+ */
+ INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
+
+ INSN_F_SPI_MASK = 0x3f, /* 6 bits */
+ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+
+ INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
};
-struct bpf_idx_pair {
- u32 prev_idx;
+static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
+static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
+
+struct bpf_jmp_history_entry {
u32 idx;
+ /* insn idx can't be bigger than 1 million */
+ u32 prev_idx : 22;
+ /* special flags, e.g., whether insn is doing register stack spill/load */
+ u32 flags : 10;
};
-#define MAX_CALL_FRAMES 8
/* Maximum number of register states that can exist at once */
#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
struct bpf_verifier_state {
@@ -408,7 +445,7 @@ struct bpf_verifier_state {
* For most states jmp_history_cnt is [0-3].
* For loops can go up to ~40.
*/
- struct bpf_idx_pair *jmp_history;
+ struct bpf_jmp_history_entry *jmp_history;
u32 jmp_history_cnt;
u32 dfs_depth;
u32 callback_unroll_depth;
@@ -569,17 +606,28 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
#define BPF_MAX_SUBPROGS 256
+struct bpf_subprog_arg_info {
+ enum bpf_arg_type arg_type;
+ union {
+ u32 mem_size;
+ };
+};
+
struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
- bool has_tail_call;
- bool tail_call_reachable;
- bool has_ld_abs;
- bool is_cb;
- bool is_async_cb;
- bool is_exception_cb;
+ bool has_tail_call: 1;
+ bool tail_call_reachable: 1;
+ bool has_ld_abs: 1;
+ bool is_cb: 1;
+ bool is_async_cb: 1;
+ bool is_exception_cb: 1;
+ bool args_cached: 1;
+
+ u8 arg_cnt;
+ struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
};
struct bpf_verifier_env;
@@ -618,6 +666,7 @@ struct bpf_verifier_env {
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
bool test_state_freq; /* test verifier with different pruning frequency */
+ bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
@@ -630,6 +679,10 @@ struct bpf_verifier_env {
int exception_callback_subprog;
bool explore_alu_limits;
bool allow_ptr_leaks;
+ /* Allow access to uninitialized stack memory. Writes with fixed offset are
+ * always allowed, so this refers to reads (with fixed or variable offset),
+ * to writes with variable offset and to indirect (helper) accesses.
+ */
bool allow_uninit_stack;
bool bpf_capable;
bool bypass_spec_v1;
@@ -650,6 +703,7 @@ struct bpf_verifier_env {
int cur_stack;
} cfg;
struct backtrack_state bt;
+ struct bpf_jmp_history_entry *cur_hist_ent;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -684,6 +738,16 @@ struct bpf_verifier_env {
char tmp_str_buf[TMP_STR_BUF_LEN];
};
+static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->prog->aux->func_info_aux[subprog];
+}
+
+static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->subprog_info[subprog];
+}
+
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
@@ -695,6 +759,10 @@ int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
+__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
+ u32 insn_off,
+ const char *prefix_fmt, ...);
+
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
@@ -717,14 +785,6 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
-int check_ptr_off_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno);
-int check_func_arg_reg_off(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno,
- enum bpf_arg_type arg_type);
-int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- u32 regno, u32 mem_size);
-
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
struct btf *btf, u32 btf_id)
@@ -794,4 +854,76 @@ static inline bool bpf_type_has_unsafe_modifiers(u32 type)
return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
}
+static inline bool type_is_ptr_alloc_obj(u32 type)
+{
+ return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
+}
+
+static inline bool type_is_non_owning_ref(u32 type)
+{
+ return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
+}
+
+static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
+{
+ type = base_type(type);
+ return type == PTR_TO_PACKET ||
+ type == PTR_TO_PACKET_META;
+}
+
+static inline bool type_is_sk_pointer(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_SOCK_COMMON ||
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_XDP_SOCK;
+}
+
+static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
+{
+ env->scratched_regs |= 1U << regno;
+}
+
+static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
+{
+ env->scratched_stack_slots |= 1ULL << spi;
+}
+
+static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
+{
+ return (env->scratched_regs >> regno) & 1;
+}
+
+static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
+{
+ return (env->scratched_stack_slots >> regno) & 1;
+}
+
+static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
+{
+ return env->scratched_regs || env->scratched_stack_slots;
+}
+
+static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = 0U;
+ env->scratched_stack_slots = 0ULL;
+}
+
+/* Used for printing the entire verifier state. */
+static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = ~0U;
+ env->scratched_stack_slots = ~0ULL;
+}
+
+const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
+const char *dynptr_type_str(enum bpf_dynptr_type type);
+const char *iter_type_str(const struct btf *btf, u32 btf_id);
+const char *iter_state_str(enum bpf_iter_state state);
+
+void print_verifier_state(struct bpf_verifier_env *env,
+ const struct bpf_func_state *state, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
deleted file mode 100644
index 736ded4905..0000000000
--- a/include/linux/bpfilter.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BPFILTER_H
-#define _LINUX_BPFILTER_H
-
-#include <uapi/linux/bpfilter.h>
-#include <linux/usermode_driver.h>
-#include <linux/sockptr.h>
-
-struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
- int __user *optlen);
-
-struct bpfilter_umh_ops {
- struct umd_info info;
- /* since ip_getsockopt() can run in parallel, serialize access to umh */
- struct mutex lock;
- int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen, bool is_set);
- int (*start)(void);
-};
-extern struct bpfilter_umh_ops bpfilter_ops;
-#endif
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 59d404e228..cf5c6ff489 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -512,7 +512,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
struct module *owner);
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
-const struct btf_member *
+const struct btf_type *
btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, enum bpf_prog_type prog_type,
int arg);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 5f23ee5998..d78454a4dd 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -205,7 +205,6 @@ struct buffer_head *create_empty_buffers(struct folio *folio,
unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -252,11 +251,10 @@ void __bh_read_batch(int nr, struct buffer_head *bhs[],
* address_spaces.
*/
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block);
int __block_write_full_folio(struct inode *inode, struct folio *folio,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler);
+ get_block_t *get_block, struct writeback_control *wbc);
int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
@@ -270,7 +268,6 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
-void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 9900d20b76..0ecb17bb68 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -85,6 +85,31 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+#ifndef __cacheline_group_begin
+#define __cacheline_group_begin(GROUP) \
+ __u8 __cacheline_group_begin__##GROUP[0]
+#endif
+
+#ifndef __cacheline_group_end
+#define __cacheline_group_end(GROUP) \
+ __u8 __cacheline_group_end__##GROUP[0]
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_MEMBER
+#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
+ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
+ offsetofend(TYPE, MEMBER) <= \
+ offsetof(TYPE, __cacheline_group_end__##GROUP)))
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_SIZE
+#define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
+ BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
+ SIZE)
+#endif
+
/*
* Helper to add padding within a struct to ensure data fall into separate
* cachelines.
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
index 94ad2c9017..6355a36a3f 100644
--- a/include/linux/cdx/cdx_bus.h
+++ b/include/linux/cdx/cdx_bus.h
@@ -113,6 +113,7 @@ struct cdx_controller {
* @dev_num: Device number for this device
* @res: array of MMIO region entries
* @res_attr: resource binary attribute
+ * @debugfs_dir: debugfs directory for this device
* @res_count: number of valid MMIO regions
* @dma_mask: Default DMA mask
* @flags: CDX device flags
@@ -135,6 +136,8 @@ struct cdx_device {
u8 bus_num;
u8 dev_num;
struct resource res[MAX_CDX_DEV_RESOURCES];
+ struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES];
+ struct dentry *debugfs_dir;
u8 res_count;
u64 dma_mask;
u16 flags;
@@ -147,6 +150,15 @@ struct cdx_device {
#define to_cdx_device(_dev) \
container_of(_dev, struct cdx_device, dev)
+#define cdx_resource_start(dev, num) ((dev)->res[(num)].start)
+#define cdx_resource_end(dev, num) ((dev)->res[(num)].end)
+#define cdx_resource_flags(dev, num) ((dev)->res[(num)].flags)
+#define cdx_resource_len(dev, num) \
+ ((cdx_resource_start((dev), (num)) == 0 && \
+ cdx_resource_end((dev), (num)) == \
+ cdx_resource_start((dev), (num))) ? 0 : \
+ (cdx_resource_end((dev), (num)) - \
+ cdx_resource_start((dev), (num)) + 1))
/**
* struct cdx_driver - CDX device driver
* @driver: Generic device driver
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5edf9fffa0..f66f6aac74 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -573,9 +573,12 @@ int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
*/
#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
-static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op)
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
{
- return __ceph_alloc_sparse_ext_map(op, CEPH_SPARSE_EXT_ARRAY_INITIAL);
+ if (!cnt)
+ cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL;
+
+ return __ceph_alloc_sparse_ext_map(op, cnt);
}
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
index 3552ec82b7..f0df518e11 100644
--- a/include/linux/cfi.h
+++ b/include/linux/cfi.h
@@ -9,6 +9,14 @@
#include <linux/bug.h>
#include <linux/module.h>
+#include <asm/cfi.h>
+
+#ifndef cfi_get_offset
+static inline int cfi_get_offset(void)
+{
+ return 0;
+}
+#endif
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
@@ -38,4 +46,8 @@ static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
#endif /* CONFIG_MODULES */
+#ifndef CFI_NOSEAL
+#define CFI_NOSEAL(x)
+#endif
+
#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4a6b6b77cc..ea48c861cd 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -496,6 +496,20 @@ struct cgroup {
struct cgroup_rstat_cpu __percpu *rstat_cpu;
struct list_head rstat_css_list;
+ /*
+ * Add padding to separate the read mostly rstat_cpu and
+ * rstat_css_list into a different cacheline from the following
+ * rstat_flush_next and *bstat fields which can have frequent updates.
+ */
+ CACHELINE_PADDING(_pad_);
+
+ /*
+ * A singly-linked list of cgroup structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ */
+ struct cgroup *rstat_flush_next;
+
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
@@ -548,6 +562,10 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+ struct rcu_head rcu; /* Must be near the top */
+
/*
* The root cgroup. The containing cgroup_root will be destroyed on its
* release. cgrp->ancestors[0] will be used overflowing into the
@@ -561,9 +579,6 @@ struct cgroup_root {
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
/* Hierarchy-specific flags */
unsigned int flags;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0ef0af6608..34aaf0e87d 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -69,6 +69,7 @@ struct css_task_iter {
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern spinlock_t css_set_lock;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -386,7 +387,6 @@ static inline void cgroup_unlock(void)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
rcu_read_lock_sched_held() || \
@@ -853,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 9f1a9c455b..c2d09bc4f9 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -125,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* trivial wrapper around DEFINE_CLASS() above specifically
* for locks.
*
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ * wrapper around EXTEND_CLASS above to add conditional lock
+ * variants to a base class, eg. mutex_trylock() or
+ * mutex_lock_interruptible().
+ *
* guard(name):
- * an anonymous instance of the (guard) class
+ * an anonymous instance of the (guard) class, not recommended for
+ * conditional locks.
*
* scoped_guard (name, args...) { }:
* similar to CLASS(name, scope)(args), except the variable (with the
* explicit name 'scope') is declard in a for-loop such that its scope is
* bound to the next (compound) statement.
*
+ * for conditional locks the loop body is skipped when the lock is not
+ * acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ * similar to scoped_guard(), except it does fail when the lock
+ * acquire fails.
+ *
*/
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
- DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
+ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { return *_T; }
+
+#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+ class_##_name##_t _T) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
#define guard(_name) \
CLASS(_name, __UNIQUE_ID(guard))
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+
#define scoped_guard(_name, args...) \
for (CLASS(_name, scope)(args), \
- *done = NULL; !done; done = (void *)1)
+ *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; !done; done = (void *)1) \
+ if (!__guard_ptr(_name)(&scope)) _fail; \
+ else
/*
* Additional helper macros for generating lock guards with types, either for
@@ -152,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
*
* DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
* DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
*
* will result in the following type:
*
@@ -173,6 +204,11 @@ typedef struct { \
static inline void class_##_name##_destructor(class_##_name##_t *_T) \
{ \
if (_T->lock) { _unlock; } \
+} \
+ \
+static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+{ \
+ return _T->lock; \
}
@@ -201,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_0(_name, _lock)
+#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ if (_T->lock && !(_condlock)) _T->lock = NULL; \
+ _t; }), \
+ typeof_member(class_##_name##_t, lock) l) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
+
+
#endif /* __LINUX_GUARDS_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 687aaaff80..75bd1692d2 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -156,7 +156,7 @@
#endif
#define __diag_ignore_all(option, comment) \
- __diag_GCC(8, ignore, option)
+ __diag(__diag_GCC_ignore option)
/*
* Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
diff --git a/include/linux/connector.h b/include/linux/connector.h
index cec2d99ae9..70bc1160f3 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -100,8 +100,7 @@ void cn_del_callback(const struct cb_id *id);
*/
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid,
u32 group, gfp_t gfp_mask,
- int (*filter)(struct sock *dsk, struct sk_buff *skb,
- void *data),
+ netlink_filter_fn filter,
void *filter_data);
/**
diff --git a/include/linux/container.h b/include/linux/container.h
index 2566a1baa7..dd00cc918a 100644
--- a/include/linux/container.h
+++ b/include/linux/container.h
@@ -12,7 +12,7 @@
#include <linux/device.h>
/* drivers/base/power/container.c */
-extern struct bus_type container_subsys;
+extern const struct bus_type container_subsys;
struct container_dev {
struct device dev;
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a269fffaf9..a4cb7dd6ca 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -64,6 +64,7 @@ enum coresight_dev_subtype_source {
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM,
CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
};
diff --git a/include/linux/cper.h b/include/linux/cper.h
index c1a7dc3251..265b0f8fc0 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -90,6 +90,29 @@ enum {
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CPER_SEC_CXL_GEN_MEDIA_GUID \
+ GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CPER_SEC_CXL_DRAM_GUID \
+ GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
+ 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CPER_SEC_CXL_MEM_MODULE_GUID \
+ GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
+ 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
+
/*
* Flags bits definitions for flags in struct cper_record_header
* If set, the error has been recovered
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index e990c18028..8654714421 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -82,6 +82,7 @@ extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
+extern bool arch_cpu_is_hotpluggable(int cpu);
extern int arch_register_cpu(int cpu);
extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
@@ -90,6 +91,10 @@ extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+DECLARE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
/*
* These states are not related to the core CPU hotplug mechanism. They are
* used by various (sub)architectures to track internal state
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 90f8bd1736..320fab7d2e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1216,6 +1216,7 @@ void arch_set_freq_scale(const struct cpumask *cpus,
{
}
#endif
+
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index efc0c0b07e..172d0a743e 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -66,15 +66,12 @@ enum cpuhp_state {
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
- CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
- /* Must be after CPUHP_MM_VMSTAT_DEAD */
- CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -96,7 +93,6 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
- CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
CPUHP_AP_DTPM_CPU_DEAD,
@@ -108,7 +104,6 @@ enum cpuhp_state {
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
- CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
@@ -118,13 +113,11 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
- CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
CPUHP_MM_ZS_PREPARE,
- CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
@@ -151,18 +144,14 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
@@ -179,7 +168,6 @@ enum cpuhp_state {
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
- CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
@@ -217,9 +205,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
- CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
@@ -251,9 +237,7 @@ enum cpuhp_state {
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
- CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
- /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
- CPUHP_AP_MM_DEMOTION_ONLINE,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index d629094fac..875d12598b 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -77,6 +77,7 @@ extern void cpuset_lock(void);
extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -207,6 +208,11 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
return false;
}
+static inline bool cpuset_cpu_is_isolated(int cpu)
+{
+ return false;
+}
+
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 5126a4fecb..9eaeaafe0c 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -87,12 +87,6 @@ Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
void final_note(Elf_Word *buf);
-#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
-#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
-#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
-#endif
-#endif
-
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base,
unsigned long long *low_size, bool *high);
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index 72c92c396b..cd4f420231 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,19 +5,12 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
-extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
-extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
-static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
-{
- return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
-}
-
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
new file mode 100644
index 0000000000..03fa6d50d4
--- /dev/null
+++ b/include/linux/cxl-event.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+#ifndef _LINUX_CXL_EVENT_H
+#define _LINUX_CXL_EVENT_H
+
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_hdr {
+ u8 length;
+ u8 flags[3];
+ __le16 handle;
+ __le16 related_handle;
+ __le64 timestamp;
+ u8 maint_op_class;
+ u8 reserved[15];
+} __packed;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+struct cxl_event_generic {
+ struct cxl_event_record_hdr hdr;
+ u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
+} __packed;
+
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+struct cxl_event_gen_media {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 device[3];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 reserved[46];
+} __packed;
+
+/*
+ * DRAM Event Record - DER
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
+ */
+#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
+struct cxl_event_dram {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ u8 column[2];
+ u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
+ u8 reserved[0x17];
+} __packed;
+
+/*
+ * Get Health Info Record
+ * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
+ */
+struct cxl_get_health_info {
+ u8 health_status;
+ u8 media_status;
+ u8 add_status;
+ u8 life_used;
+ u8 device_temp[2];
+ u8 dirty_shutdown_cnt[4];
+ u8 cor_vol_err_cnt[4];
+ u8 cor_per_err_cnt[4];
+} __packed;
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+struct cxl_event_mem_module {
+ struct cxl_event_record_hdr hdr;
+ u8 event_type;
+ struct cxl_get_health_info info;
+ u8 reserved[0x3d];
+} __packed;
+
+union cxl_event {
+ struct cxl_event_generic generic;
+ struct cxl_event_gen_media gen_media;
+ struct cxl_event_dram dram;
+ struct cxl_event_mem_module mem_module;
+} __packed;
+
+/*
+ * Common Event Record Format; in event logs
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_raw {
+ uuid_t id;
+ union cxl_event event;
+} __packed;
+
+enum cxl_event_type {
+ CXL_CPER_EVENT_GENERIC,
+ CXL_CPER_EVENT_GEN_MEDIA,
+ CXL_CPER_EVENT_DRAM,
+ CXL_CPER_EVENT_MEM_MODULE,
+};
+
+#define CPER_CXL_DEVICE_ID_VALID BIT(0)
+#define CPER_CXL_DEVICE_SN_VALID BIT(1)
+#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
+struct cxl_cper_event_rec {
+ struct {
+ u32 length;
+ u64 validation_bits;
+ struct cper_cxl_event_devid {
+ u16 vendor_id;
+ u16 device_id;
+ u8 func_num;
+ u8 device_num;
+ u8 bus_num;
+ u16 segment_num;
+ u16 slot_num; /* bits 2:0 reserved */
+ u8 reserved;
+ } __packed device_id;
+ struct cper_cxl_event_sn {
+ u32 lower_dw;
+ u32 upper_dw;
+ } __packed dev_serial_num;
+ } __packed hdr;
+
+ union cxl_event event;
+} __packed;
+
+#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
index e00ddf1ed3..5881e4ac30 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -2,7 +2,7 @@
/*
* DAMON api
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifndef _DAMON_H_
@@ -136,6 +136,9 @@ enum damos_action {
* @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
* @weight_age: Weight of the region's age for prioritization.
*
+ * @get_score: Feedback function for self-tuning quota.
+ * @get_score_arg: Parameter for @get_score
+ *
* To avoid consuming too much CPU time or IO resources for applying the
* &struct damos->action to large memory, DAMON allows users to set time and/or
* size quotas. The quotas can be set by writing non-zero values to &ms and
@@ -153,6 +156,17 @@ enum damos_action {
* You could customize the prioritization logic by setting &weight_sz,
* &weight_nr_accesses, and &weight_age, because monitoring operations are
* encouraged to respect those.
+ *
+ * If @get_score function pointer is set, DAMON calls it back with
+ * @get_score_arg and get the return value of it for every @reset_interval.
+ * Then, DAMON adjusts the effective quota using the return value as a feedback
+ * score to the current quota, using its internal feedback loop algorithm.
+ *
+ * The feedback loop algorithem assumes the quota input and the feedback score
+ * output are in a positive proportional relationship, and the goal of the
+ * tuning is getting the feedback screo value of 10,000. If @ms and/or @sz are
+ * set together, those work as a hard limit quota. If neither @ms nor @sz are
+ * set, the mechanism starts from the quota of one byte.
*/
struct damos_quota {
unsigned long ms;
@@ -163,6 +177,9 @@ struct damos_quota {
unsigned int weight_nr_accesses;
unsigned int weight_age;
+ unsigned long (*get_score)(void *arg);
+ void *get_score_arg;
+
/* private: */
/* For throughput estimation */
unsigned long total_charged_sz;
@@ -179,6 +196,9 @@ struct damos_quota {
/* For prioritization */
unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
+
+ /* For feedback loop */
+ unsigned long esz_bp;
};
/**
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3da2f0545d..d07cf2f1bb 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -68,12 +68,12 @@ extern const struct qstr dotdot_name;
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
-# define DNAME_INLINE_LEN 32 /* 192 bytes */
+# define DNAME_INLINE_LEN 40 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 36 /* 128 bytes */
-# else
# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# else
+# define DNAME_INLINE_LEN 44 /* 128 bytes */
# endif
#endif
@@ -101,8 +101,8 @@ struct dentry {
struct list_head d_lru; /* LRU list */
wait_queue_head_t *d_wait; /* in-lookup ones only */
};
- struct list_head d_child; /* child of parent list */
- struct list_head d_subdirs; /* our children */
+ struct hlist_node d_sib; /* child of parent list */
+ struct hlist_head d_children; /* our children */
/*
* d_alias and d_rcu can share memory
*/
@@ -111,7 +111,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-} __randomize_layout;
+};
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -151,13 +151,13 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_OP_HASH 0x00000001
-#define DCACHE_OP_COMPARE 0x00000002
-#define DCACHE_OP_REVALIDATE 0x00000004
-#define DCACHE_OP_DELETE 0x00000008
-#define DCACHE_OP_PRUNE 0x00000010
+#define DCACHE_OP_HASH BIT(0)
+#define DCACHE_OP_COMPARE BIT(1)
+#define DCACHE_OP_REVALIDATE BIT(2)
+#define DCACHE_OP_DELETE BIT(3)
+#define DCACHE_OP_PRUNE BIT(4)
-#define DCACHE_DISCONNECTED 0x00000020
+#define DCACHE_DISCONNECTED BIT(5)
/* This dentry is possibly not currently connected to the dcache tree, in
* which case its parent will either be itself, or will have this flag as
* well. nfsd will not use a dentry with this bit set, but will first
@@ -168,50 +168,47 @@ struct dentry_operations {
* dentry into place and return that dentry rather than the passed one,
* typically using d_splice_alias. */
-#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
+#define DCACHE_REFERENCED BIT(6) /* Recently used, don't discard. */
-#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */
+#define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */
-#define DCACHE_CANT_MOUNT 0x00000100
-#define DCACHE_GENOCIDE 0x00000200
-#define DCACHE_SHRINK_LIST 0x00000400
+#define DCACHE_CANT_MOUNT BIT(8)
+#define DCACHE_GENOCIDE BIT(9)
+#define DCACHE_SHRINK_LIST BIT(10)
-#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
+#define DCACHE_OP_WEAK_REVALIDATE BIT(11)
-#define DCACHE_NFSFS_RENAMED 0x00001000
+#define DCACHE_NFSFS_RENAMED BIT(12)
/* this dentry has been "silly renamed" and has to be deleted on the last
* dput() */
-#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
+#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(14)
/* Parent inode is watched by some fsnotify listener */
-#define DCACHE_DENTRY_KILLED 0x00008000
+#define DCACHE_DENTRY_KILLED BIT(15)
-#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
+#define DCACHE_MOUNTED BIT(16) /* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT BIT(17) /* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT BIT(18) /* manage transit from this dirent */
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST 0x00080000
+#define DCACHE_LRU_LIST BIT(19)
-#define DCACHE_ENTRY_TYPE 0x00700000
-#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
-#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
-#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
-#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
+#define DCACHE_ENTRY_TYPE (7 << 20) /* bits 20..22 are for storing type: */
+#define DCACHE_MISS_TYPE (0 << 20) /* Negative dentry */
+#define DCACHE_WHITEOUT_TYPE (1 << 20) /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE (2 << 20) /* Normal directory */
+#define DCACHE_AUTODIR_TYPE (3 << 20) /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE (4 << 20) /* Regular file type */
+#define DCACHE_SPECIAL_TYPE (5 << 20) /* Other file type */
+#define DCACHE_SYMLINK_TYPE (6 << 20) /* Symlink */
-#define DCACHE_MAY_FREE 0x00800000
-#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */
-#define DCACHE_OP_REAL 0x04000000
+#define DCACHE_NOKEY_NAME BIT(25) /* Encrypted name encoded without key */
+#define DCACHE_OP_REAL BIT(26)
-#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR 0x20000000
-#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
+#define DCACHE_PAR_LOOKUP BIT(28) /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR BIT(29)
+#define DCACHE_NORCU BIT(30) /* No RCU delay for freeing */
extern seqlock_t rename_lock;
@@ -220,8 +217,6 @@ extern seqlock_t rename_lock;
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -242,15 +237,11 @@ extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_for_umount(struct super_block *);
extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
-/* <clickety>-<click> the ramfs-type tree */
-extern void d_genocide(struct dentry *);
-
extern void d_mark_tmpfile(struct file *, struct inode *);
extern void d_tmpfile(struct file *, struct inode *);
@@ -274,12 +265,8 @@ extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
-/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
-extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
- const struct qstr *name, unsigned *seq);
static inline unsigned d_count(const struct dentry *dentry)
{
@@ -301,20 +288,40 @@ extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
/**
- * dget, dget_dlock - get a reference to a dentry
- * @dentry: dentry to get a reference to
+ * dget_dlock - get a reference to a dentry
+ * @dentry: dentry to get a reference to
*
- * Given a dentry or %NULL pointer increment the reference count
- * if appropriate and return the dentry. A dentry will not be
- * destroyed when it has references.
+ * Given a live dentry, increment the reference count and return the dentry.
+ * Caller must hold @dentry->d_lock. Making sure that dentry is alive is
+ * caller's resonsibility. There are many conditions sufficient to guarantee
+ * that; e.g. anything with non-negative refcount is alive, so's anything
+ * hashed, anything positive, anyone's parent, etc.
*/
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
- if (dentry)
- dentry->d_lockref.count++;
+ dentry->d_lockref.count++;
return dentry;
}
+
+/**
+ * dget - get a reference to a dentry
+ * @dentry: dentry to get a reference to
+ *
+ * Given a dentry or %NULL pointer increment the reference count
+ * if appropriate and return the dentry. A dentry will not be
+ * destroyed when it has references. Conversely, a dentry with
+ * no references can disappear for any number of reasons, starting
+ * with memory pressure. In other words, that primitive is
+ * used to clone an existing reference; using it on something with
+ * zero refcount is a bug.
+ *
+ * NOTE: it will spin if @dentry->d_lock is held. From the deadlock
+ * avoidance point of view it is equivalent to spin_lock()/increment
+ * refcount/spin_unlock(), so calling it under @dentry->d_lock is
+ * always a bug; so's calling it under ->d_lock on any of its descendents.
+ *
+ */
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry)
@@ -325,12 +332,11 @@ static inline struct dentry *dget(struct dentry *dentry)
extern struct dentry *dget_parent(struct dentry *dentry);
/**
- * d_unhashed - is dentry hashed
- * @dentry: entry to check
+ * d_unhashed - is dentry hashed
+ * @dentry: entry to check
*
- * Returns true if the dentry passed is not currently hashed.
+ * Returns true if the dentry passed is not currently hashed.
*/
-
static inline int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
@@ -490,14 +496,6 @@ static inline int simple_positive(const struct dentry *dentry)
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
-extern void d_set_fallthru(struct dentry *dentry);
-
-static inline bool d_is_fallthru(const struct dentry *dentry)
-{
- return dentry->d_flags & DCACHE_FALLTHRU;
-}
-
-
extern int sysctl_vfs_cache_pressure;
static inline unsigned long vfs_pressure_ratio(unsigned long val)
@@ -547,21 +545,6 @@ static inline struct inode *d_backing_inode(const struct dentry *upper)
}
/**
- * d_backing_dentry - Get upper or lower dentry we should be using
- * @upper: The upper layer
- *
- * This is the helper that should be used to get the dentry of the inode that
- * will be used if this dentry were opened as a file. It may be the upper
- * dentry or it may be a lower dentry pinned by the upper.
- *
- * Normal filesystems should not use this to access their own dentries.
- */
-static inline struct dentry *d_backing_dentry(struct dentry *upper)
-{
- return upper;
-}
-
-/**
* d_real - Return the real dentry
* @dentry: the dentry to query
* @inode: inode to select the dentry from multiple layers (can be NULL)
@@ -600,4 +583,14 @@ struct name_snapshot {
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
+static inline struct dentry *d_first_child(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib);
+}
+
+static inline struct dentry *d_next_sibling(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib);
+}
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 6c83294395..b9f5464f44 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -42,7 +42,6 @@ struct class;
struct subsys_private;
struct device_node;
struct fwnode_handle;
-struct iommu_ops;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
@@ -63,7 +62,7 @@ struct msi_device_data;
*/
struct subsys_interface {
const char *name;
- struct bus_type *subsys;
+ const struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
@@ -72,9 +71,9 @@ struct subsys_interface {
int subsys_interface_register(struct subsys_interface *sif);
void subsys_interface_unregister(struct subsys_interface *sif);
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups);
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups);
/*
@@ -662,7 +661,6 @@ struct device_physical_location {
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
* @devres_head: The resources list of the device.
- * @knode_class: The node used to add the device to the class list.
* @class: The class of the device.
* @groups: Optional attribute groups.
* @release: Callback to free the device after all references have
@@ -1073,7 +1071,6 @@ int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
-int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
{
@@ -1250,6 +1247,7 @@ void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
+void device_link_wait_removal(void);
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index ae10c43227..5ef4ec1c36 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -62,9 +62,6 @@ struct fwnode_handle;
* this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
- * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
- * driver implementations to a bus and allow the driver to do
- * bus-specific setup
* @need_parent_lock: When probing or removing a device on this bus, the
* device core should lock the device's parent.
*
@@ -104,8 +101,6 @@ struct bus_type {
const struct dev_pm_ops *pm;
- const struct iommu_ops *iommu_ops;
-
bool need_parent_lock;
};
@@ -232,7 +227,7 @@ bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
+void bus_sort_breadthfirst(const struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b));
/*
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index abf3d3bfb6..c576b49c55 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -40,8 +40,6 @@ struct fwnode_handle;
* for the devices belonging to the class. Usually tied to
* device's namespace.
* @pm: The default device power management operations of this class.
- * @p: The private data of the driver core, no one other than the
- * driver core can touch this.
*
* A class is a higher-level view of a device that abstracts out low-level
* implementation details. Drivers may see a SCSI disk or an ATA disk, but,
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3f31baa329..8ff4add71f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -343,16 +343,19 @@ struct dma_buf {
/**
* @exp_name:
*
- * Name of the exporter; useful for debugging. See the
- * DMA_BUF_SET_NAME IOCTL.
+ * Name of the exporter; useful for debugging. Must not be NULL
*/
const char *exp_name;
/**
* @name:
*
- * Userspace-provided name; useful for accounting and debugging,
- * protected by dma_resv_lock() on @resv and @name_lock for read access.
+ * Userspace-provided name. Default value is NULL. If not NULL,
+ * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
+ * char. Useful for accounting and debugging. Read/Write accesses
+ * are protected by @name_lock
+ *
+ * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
*/
const char *name;
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 18aade1958..3eb3589ff4 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -21,7 +21,6 @@ struct bus_dma_region {
phys_addr_t cpu_start;
dma_addr_t dma_start;
u64 size;
- u64 offset;
};
static inline dma_addr_t translate_phys_to_dma(struct device *dev,
@@ -29,9 +28,12 @@ static inline dma_addr_t translate_phys_to_dma(struct device *dev,
{
const struct bus_dma_region *m;
- for (m = dev->dma_range_map; m->size; m++)
- if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
- return (dma_addr_t)paddr - m->offset;
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = paddr - m->cpu_start;
+
+ if (paddr >= m->cpu_start && offset < m->size)
+ return m->dma_start + offset;
+ }
/* make sure dma_capable fails when no translation is available */
return DMA_MAPPING_ERROR;
@@ -42,9 +44,12 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev,
{
const struct bus_dma_region *m;
- for (m = dev->dma_range_map; m->size; m++)
- if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
- return (phys_addr_t)dma_addr + m->offset;
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = dma_addr - m->dma_start;
+
+ if (dma_addr >= m->dma_start && offset < m->size)
+ return m->cpu_start + offset;
+ }
return (phys_addr_t)-1;
}
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index b3772edca2..c3f9bb6602 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/printk.h>
#include <linux/rcupdate.h>
+#include <linux/timekeeping.h>
struct dma_fence;
struct dma_fence_ops;
@@ -681,4 +682,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
}
+#define DMA_FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
+ ##args); \
+ } while (0)
+
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index f2fc203fb8..4abc60f042 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -11,6 +11,7 @@
#include <linux/slab.h>
struct cma;
+struct iommu_ops;
/*
* Values for struct dma_map_ops.flags:
@@ -426,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
+ bool coherent);
#else
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu, bool coherent)
+ u64 size, bool coherent)
{
}
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
@@ -443,10 +444,10 @@ static inline void arch_teardown_dma_ops(struct device *dev)
#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
#ifdef CONFIG_DMA_API_DEBUG
-void dma_debug_add_bus(struct bus_type *bus);
+void dma_debug_add_bus(const struct bus_type *bus);
void debug_dma_dump_mappings(struct device *dev);
#else
-static inline void dma_debug_add_bus(struct bus_type *bus)
+static inline void dma_debug_add_bus(const struct bus_type *bus)
{
}
static inline void debug_dma_dump_mappings(struct device *dev)
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 578fc5fa37..e37344f6a2 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -10,6 +10,8 @@
#include <uapi/linux/dpll.h>
#include <linux/device.h>
#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
struct dpll_device;
struct dpll_pin;
@@ -17,9 +19,6 @@ struct dpll_pin;
struct dpll_device_ops {
int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_mode *mode, struct netlink_ext_ack *extack);
- bool (*mode_supported)(const struct dpll_device *dpll, void *dpll_priv,
- const enum dpll_mode mode,
- struct netlink_ext_ack *extack);
int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
struct netlink_ext_ack *extack);
@@ -80,6 +79,9 @@ struct dpll_pin_ops {
const struct dpll_device *dpll, void *dpll_priv,
const s32 phase_adjust,
struct netlink_ext_ack *extack);
+ int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack);
};
struct dpll_pin_frequency {
@@ -120,15 +122,24 @@ struct dpll_pin_properties {
};
#if IS_ENABLED(CONFIG_DPLL)
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin);
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin);
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void dpll_netdev_pin_clear(struct net_device *dev);
+
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev);
#else
-static inline size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
+static inline void
+dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
+static inline void dpll_netdev_pin_clear(struct net_device *dev) { }
+
+static inline size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
{
return 0;
}
-static inline int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static inline int
+dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
{
return 0;
}
diff --git a/include/linux/edac.h b/include/linux/edac.h
index fa4bda2a70..b4ee8961e6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -30,7 +30,7 @@ struct device;
extern int edac_op_state;
-struct bus_type *edac_get_sysfs_subsys(void);
+const struct bus_type *edac_get_sysfs_subsys(void);
static inline void opstate_init(void)
{
@@ -187,6 +187,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_NVDIMM: Non-volatile RAM
* @MEM_WIO2: Wide I/O 2.
* @MEM_HBM2: High bandwidth Memory Gen 2.
+ * @MEM_HBM3: High bandwidth Memory Gen 3.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -218,6 +219,7 @@ enum mem_type {
MEM_NVDIMM,
MEM_WIO2,
MEM_HBM2,
+ MEM_HBM3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -248,6 +250,7 @@ enum mem_type {
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
+#define MEM_FLAG_HBM3 BIT(MEM_HBM3)
/**
* enum edac_type - Error Detection and Correction capabilities and mode
@@ -492,7 +495,7 @@ struct edac_raw_error_desc {
*/
struct mem_ctl_info {
struct device dev;
- struct bus_type *bus;
+ const struct bus_type *bus;
struct list_head link; /* for global list of mem_ctl_info structs */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 9cc5bf32f6..c74f47711f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -40,6 +40,7 @@ struct screen_info;
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ACCESS_DENIED (15 | (1UL << (BITS_PER_LONG-1)))
#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -1348,4 +1349,15 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+/*
+ * efivar ops event type
+ */
+#define EFIVAR_OPS_RDONLY 0
+#define EFIVAR_OPS_RDWR 1
+
+extern struct blocking_notifier_head efivar_ops_nh;
+
+void efivars_generic_ops_register(void);
+void efivars_generic_ops_unregister(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index b9caa01dfa..88d91e0874 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -224,7 +224,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long allowed_cpu_cap)
{
- unsigned long freq, scale_cpu;
+ unsigned long freq, ref_freq, scale_cpu;
struct em_perf_state *ps;
int cpu;
@@ -241,11 +241,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
- ps = &pd->table[pd->nr_perf_states - 1];
+ ref_freq = arch_scale_freq_ref(cpu);
- max_util = map_util_perf(max_util);
max_util = min(max_util, allowed_cpu_cap);
- freq = map_util_freq(max_util, ps->frequency, scale_cpu);
+ freq = map_util_freq(max_util, ref_freq, scale_cpu);
/*
* Find the lowest performance state of the Energy Model above the
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index d95ab85f96..b0fb775a60 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -7,6 +7,11 @@
#include <linux/syscalls.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
+#include <linux/context_tracking.h>
+#include <linux/livepatch.h>
+#include <linux/resume_user_mode.h>
+#include <linux/tick.h>
+#include <linux/kmsan.h>
#include <asm/entry-common.h>
@@ -98,7 +103,19 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
* done between establishing state and enabling interrupts. The caller must
* enable interrupts before invoking syscall_enter_from_user_mode_work().
*/
-void enter_from_user_mode(struct pt_regs *regs);
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+{
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
/**
* syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
@@ -117,6 +134,9 @@ void enter_from_user_mode(struct pt_regs *regs);
*/
void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ unsigned long work);
+
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
* a syscall
@@ -140,7 +160,15 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
* ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
* 2) Invocation of audit_syscall_entry()
*/
-long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
+
+ return syscall;
+}
/**
* syscall_enter_from_user_mode - Establish state and check and handle work
@@ -159,7 +187,19 @@ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
* Returns: The original or a modified syscall number. See
* syscall_enter_from_user_mode_work() for further explanation.
*/
-long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = syscall_enter_from_user_mode_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
/**
* local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
@@ -259,6 +299,43 @@ static __always_inline void arch_exit_to_user_mode(void) { }
void arch_do_signal_or_restart(struct pt_regs *regs);
/**
+ * exit_to_user_mode_loop - do any pending work before leaving to user space
+ */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work);
+
+/**
+ * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ */
+static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
* Syscall/interrupt exit enables interrupts, but the kernel state is
@@ -276,7 +353,17 @@ void arch_do_signal_or_restart(struct pt_regs *regs);
* non-instrumentable.
* The caller has to invoke syscall_exit_to_user_mode_work() before this.
*/
-void exit_to_user_mode(void);
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 224645f17c..297231854a 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -608,6 +608,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
}
/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ }
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 689028257f..325e0778e9 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -95,6 +95,7 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
* @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
+ * @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
@@ -102,6 +103,7 @@ enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
+ ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
@@ -409,8 +411,10 @@ struct ethtool_pause_stats {
* not entire FEC data blocks. This is a non-standard statistic.
* Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS.
*
- * @lane: per-lane/PCS-instance counts as defined by the standard
- * @total: error counts for the entire port, for drivers incapable of reporting
+ * For each of the above fields, the two substructure members are:
+ *
+ * - @lanes: per-lane/PCS-instance counts as defined by the standard
+ * - @total: error counts for the entire port, for drivers incapable of reporting
* per-lane stats
*
* Drivers should fill in either only total or per-lane statistics, core
@@ -595,9 +599,46 @@ struct ethtool_mm_stats {
};
/**
+ * struct ethtool_rxfh_param - RXFH (RSS) parameters
+ * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
+ * Valid values are one of the %ETH_RSS_HASH_*.
+ * @indir_size: On SET, the array size of the user buffer for the
+ * indirection table, which may be zero, or
+ * %ETH_RXFH_INDIR_NO_CHANGE. On GET (read from the driver),
+ * the array size of the hardware indirection table.
+ * @indir: The indirection table of size @indir_size entries.
+ * @key_size: On SET, the array size of the user buffer for the hash key,
+ * which may be zero. On GET (read from the driver), the size of the
+ * hardware hash key.
+ * @key: The hash key of size @key_size bytes.
+ * @rss_context: RSS context identifier. Context 0 is the default for normal
+ * traffic; other contexts can be referenced as the destination for RX flow
+ * classification rules. On SET, %ETH_RXFH_CONTEXT_ALLOC is used
+ * to allocate a new RSS context; on return this field will
+ * contain the ID of the newly allocated context.
+ * @rss_delete: Set to non-ZERO to remove the @rss_context context.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ */
+struct ethtool_rxfh_param {
+ u8 hfunc;
+ u32 indir_size;
+ u32 *indir;
+ u32 key_size;
+ u8 *key;
+ u32 rss_context;
+ u8 rss_delete;
+ u8 input_xfrm;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
+ * @cap_rss_ctx_supported: indicates if the driver supports RSS
+ * contexts.
+ * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
+ * RSS.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @supported_ring_params: supported ring params.
* @get_drvinfo: Report driver/device information. Modern drivers no
@@ -694,15 +735,6 @@ struct ethtool_mm_stats {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
- * @get_rxfh_context: Get the contents of the RX flow hash indirection table,
- * hash key, and/or hash function assiciated to the given rss context.
- * Returns a negative error code or zero.
- * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting
- * the contents of the RX flow hash indirection table, hash key, and/or
- * hash function associated to the given context. Arguments which are set
- * to %NULL or zero will remain unchanged.
- * Returns a negative error code or zero. An error code must be returned
- * if at least one unsupported change was requested.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -785,6 +817,8 @@ struct ethtool_mm_stats {
*/
struct ethtool_ops {
u32 cap_link_lanes_supported:1;
+ u32 cap_rss_ctx_supported:1;
+ u32 cap_rss_sym_xor_supported:1;
u32 supported_coalesce_params;
u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
@@ -844,15 +878,9 @@ struct ethtool_ops {
int (*reset)(struct net_device *, u32 *);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
- int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc);
- int (*set_rxfh)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc);
- int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc, u32 rss_context);
- int (*set_rxfh_context)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc,
- u32 *rss_context, bool delete);
+ int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
+ int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
+ struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -1044,6 +1072,14 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
}
/**
+ * ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer.
+ * @dev: pointer to net_device structure
+ * @info: buffer to hold the result
+ * Returns zero on success, non-zero otherwise.
+ */
+int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info);
+
+/**
* ethtool_sprintf - Write formatted string to ethtool string data
* @data: Pointer to a pointer to the start of string to update
* @fmt: Format of string to write
@@ -1053,6 +1089,19 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
*/
extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+/**
+ * ethtool_puts - Write string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to update
+ * @str: String to write
+ *
+ * Write string to *data without a trailing newline. Update *data
+ * to point at start of next string.
+ *
+ * Prefer this function to ethtool_sprintf() when given only
+ * two arguments or if @fmt is just "%s".
+ */
+extern void ethtool_puts(u8 **data, const char *str);
+
/* Link mode to forced speed capabilities maps */
struct ethtool_forced_speed_map {
u32 speed;
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 5620894315..e32bee4345 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -35,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx);
-__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask);
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
@@ -58,15 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx)
+static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
- return -ENOSYS;
-}
-
-static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
- unsigned mask)
-{
- return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@@ -92,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
#endif
+static inline void eventfd_signal(struct eventfd_ctx *ctx)
+{
+ eventfd_signal_mask(ctx, 0);
+}
+
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 01fc495a83..36ec884320 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -31,6 +31,7 @@ extern void evm_inode_post_setxattr(struct dentry *dentry,
const char *xattr_name,
const void *xattr_value,
size_t xattr_value_len);
+extern int evm_inode_copy_up_xattr(const char *name);
extern int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
@@ -117,6 +118,11 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
return;
}
+static inline int evm_inode_copy_up_xattr(const char *name)
+{
+ return 0;
+}
+
static inline int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry,
const char *xattr_name)
diff --git a/include/linux/export.h b/include/linux/export.h
index 9911508a96..0bbd02fd35 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -7,15 +7,6 @@
#include <linux/stringify.h>
/*
- * Export symbols from the kernel to modules. Forked from module.h
- * to reduce the amount of pointless cruft we feed to gcc when only
- * exporting a simple symbol or two.
- *
- * Try not to add #includes here. It slows compilation and makes kernel
- * hackers place grumpy comments in header files.
- */
-
-/*
* This comment block is used by fixdep. Please do not remove.
*
* When CONFIG_MODVERSIONS is changed from n to y, all source files having
@@ -23,15 +14,6 @@
* side effect of the *.o build rule.
*/
-#ifndef __ASSEMBLY__
-#ifdef MODULE
-extern struct module __this_module;
-#define THIS_MODULE (&__this_module)
-#else
-#define THIS_MODULE ((struct module *)0)
-#endif
-#endif /* __ASSEMBLY__ */
-
#ifdef CONFIG_64BIT
#define __EXPORT_SYMBOL_REF(sym) \
.balign 8 ASM_NL \
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 1812b7ec84..3f016d3c1e 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -442,7 +442,7 @@ struct f2fs_sit_block {
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
-#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 94e2c44c65..05dc962489 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -536,6 +536,7 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
#define __FB_DEFAULT_IOMEM_OPS_RDWR \
.fb_read = fb_io_read, \
@@ -547,7 +548,7 @@ extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
.fb_imageblit = cfb_imageblit
#define __FB_DEFAULT_IOMEM_OPS_MMAP \
- .fb_mmap = NULL /* default implementation */
+ .fb_mmap = fb_io_mmap
#define FB_DEFAULT_IOMEM_OPS \
__FB_DEFAULT_IOMEM_OPS_RDWR, \
@@ -848,7 +849,10 @@ static inline bool fb_modesetting_disabled(const char *drvname)
}
#endif
-/* Convenience logging macros */
+/*
+ * Convenience logging macros
+ */
+
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
@@ -860,4 +864,12 @@ static inline bool fb_modesetting_disabled(const char *drvname)
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn_once(fb_info, fmt, ...) \
+ pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \
+ WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_WARN_ON_ONCE(fb_info, x) \
+ fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")")
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index bc4c3287a6..78c8326d74 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -83,12 +83,17 @@ struct dentry;
static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-
- if (fd < fdt->max_fds) {
- fd = array_index_nospec(fd, fdt->max_fds);
- return rcu_dereference_raw(fdt->fd[fd]);
- }
- return NULL;
+ unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
+ struct file *needs_masking;
+
+ /*
+ * 'mask' is zero for an out-of-bounds fd, all ones for ok.
+ * 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
+ *
+ * Accessing fdt->fd[0] is ok, but needs masking of the result.
+ */
+ needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
+ return (struct file *)(mask & (unsigned long)needs_masking);
}
static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
@@ -114,7 +119,7 @@ int iterate_fd(struct files_struct *, unsigned,
extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern struct file *close_fd_get_file(unsigned int fd);
+extern struct file *file_close_fd(unsigned int fd);
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp);
diff --git a/include/linux/file.h b/include/linux/file.h
index 6e9099d293..6834a29338 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -96,18 +96,8 @@ DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
extern void fd_install(unsigned int fd, struct file *file);
-extern int __receive_fd(struct file *file, int __user *ufd,
- unsigned int o_flags);
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
-extern int receive_fd(struct file *file, unsigned int o_flags);
-
-static inline int receive_fd_user(struct file *file, int __user *ufd,
- unsigned int o_flags)
-{
- if (ufd == NULL)
- return -EFAULT;
- return __receive_fd(file, ufd, o_flags);
-}
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
extern void flush_delayed_fput(void);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 02e043bd93..f537a394c4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1070,7 +1070,7 @@ struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
-void bpf_prog_pack_free(struct bpf_binary_header *hdr);
+void bpf_prog_pack_free(void *ptr, u32 size);
static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
{
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index bd3fc75d4f..dd9f2d765e 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -75,7 +75,7 @@ void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
-extern struct bus_type fw_bus_type;
+extern const struct bus_type fw_bus_type;
struct fw_card_driver;
struct fw_node;
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index de7fea3bca..0311858b46 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -27,6 +27,7 @@ struct firmware {
* @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
* @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
* @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_FW_INVALID: invalid firmware file
* @FW_UPLOAD_ERR_MAX: Maximum error code marker
*/
enum fw_upload_err {
@@ -38,6 +39,7 @@ enum fw_upload_err {
FW_UPLOAD_ERR_INVALID_SIZE,
FW_UPLOAD_ERR_RW_ERROR,
FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_FW_INVALID,
FW_UPLOAD_ERR_MAX
};
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 5c28298a98..366243ee96 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -10,6 +10,7 @@
#define __QCOM_QSEECOM_H
#include <linux/auxiliary_bus.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
@@ -25,11 +26,56 @@ struct qseecom_client {
};
/**
+ * qseecom_scm_dev() - Get the SCM device associated with the QSEECOM client.
+ * @client: The QSEECOM client device.
+ *
+ * Returns the SCM device under which the provided QSEECOM client device
+ * operates. This function is intended to be used for DMA allocations.
+ */
+static inline struct device *qseecom_scm_dev(struct qseecom_client *client)
+{
+ return client->aux_dev.dev.parent->parent;
+}
+
+/**
+ * qseecom_dma_alloc() - Allocate DMA memory for a QSEECOM client.
+ * @client: The QSEECOM client to allocate the memory for.
+ * @size: The number of bytes to allocate.
+ * @dma_handle: Pointer to where the DMA address should be stored.
+ * @gfp: Allocation flags.
+ *
+ * Wrapper function for dma_alloc_coherent(), allocating DMA memory usable for
+ * TZ/QSEECOM communication. Refer to dma_alloc_coherent() for details.
+ */
+static inline void *qseecom_dma_alloc(struct qseecom_client *client, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ return dma_alloc_coherent(qseecom_scm_dev(client), size, dma_handle, gfp);
+}
+
+/**
+ * dma_free_coherent() - Free QSEECOM DMA memory.
+ * @client: The QSEECOM client for which the memory has been allocated.
+ * @size: The number of bytes allocated.
+ * @cpu_addr: Virtual memory address to free.
+ * @dma_handle: DMA memory address to free.
+ *
+ * Wrapper function for dma_free_coherent(), freeing memory previously
+ * allocated with qseecom_dma_alloc(). Refer to dma_free_coherent() for
+ * details.
+ */
+static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_coherent(qseecom_scm_dev(client), size, cpu_addr, dma_handle);
+}
+
+/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given client and read
@@ -43,8 +89,9 @@ struct qseecom_client {
*
* Return: Zero on success, nonzero on failure.
*/
-static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
- void *rsp, size_t rsp_size)
+static inline int qcom_qseecom_app_send(struct qseecom_client *client,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index ccaf288460..aaa19f93ac 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -118,8 +118,8 @@ bool qcom_scm_lmh_dcvsh_available(void);
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size);
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size);
#else /* CONFIG_QCOM_QSEECOM */
@@ -128,9 +128,9 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
return -EINVAL;
}
-static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
- size_t req_size, void *rsp,
- size_t rsp_size)
+static inline int qcom_scm_qseecom_app_send(u32 app_id,
+ dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
return -EINVAL;
}
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index d1ea389856..9a7e527392 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -32,6 +32,7 @@
#define PM_SIP_SVC 0xC2000000
/* PM API versions */
+#define PM_API_VERSION_1 1
#define PM_API_VERSION_2 2
#define PM_PINCTRL_PARAM_SET_VERSION 2
@@ -47,6 +48,9 @@
#define FAMILY_CODE_MASK GENMASK(27, 21)
#define SUB_FAMILY_CODE_MASK GENMASK(20, 19)
+#define API_ID_MASK GENMASK(7, 0)
+#define MODULE_ID_MASK GENMASK(11, 8)
+
/* ATF only commands */
#define TF_A_PM_REGISTER_SGI 0xa04
#define PM_GET_TRUSTZONE_VERSION 0xa03
@@ -91,10 +95,18 @@
/*
* Node IDs for the Error Events.
*/
-#define EVENT_ERROR_PMC_ERR1 (0x28100000U)
-#define EVENT_ERROR_PMC_ERR2 (0x28104000U)
-#define EVENT_ERROR_PSM_ERR1 (0x28108000U)
-#define EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR3 (0x28108000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR1 (0x2810C000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR2 (0x28110000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR3 (0x28114000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR4 (0x28118000U)
/* ZynqMP SD tap delay tuning */
#define SD_ITAPDLY 0xFF180314
@@ -112,6 +124,12 @@
#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13)
#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12)
+enum pm_module_id {
+ PM_MODULE_ID = 0x0,
+ XSEM_MODULE_ID = 0x3,
+ TF_A_MODULE_ID = 0xa,
+};
+
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
PM_ACKNOWLEDGE_CB = 31,
@@ -119,6 +137,7 @@ enum pm_api_cb_id {
};
enum pm_api_id {
+ PM_API_FEATURES = 0,
PM_GET_API_VERSION = 1,
PM_REGISTER_NOTIFIER = 5,
PM_FORCE_POWERDOWN = 8,
@@ -138,7 +157,6 @@ enum pm_api_id {
PM_SECURE_SHA = 26,
PM_PINCTRL_REQUEST = 28,
PM_PINCTRL_RELEASE = 29,
- PM_PINCTRL_GET_FUNCTION = 30,
PM_PINCTRL_SET_FUNCTION = 31,
PM_PINCTRL_CONFIG_PARAM_GET = 32,
PM_PINCTRL_CONFIG_PARAM_SET = 33,
@@ -149,8 +167,6 @@ enum pm_api_id {
PM_CLOCK_GETSTATE = 38,
PM_CLOCK_SETDIVIDER = 39,
PM_CLOCK_GETDIVIDER = 40,
- PM_CLOCK_SETRATE = 41,
- PM_CLOCK_GETRATE = 42,
PM_CLOCK_SETPARENT = 43,
PM_CLOCK_GETPARENT = 44,
PM_FPGA_READ = 46,
@@ -161,7 +177,9 @@ enum pm_api_id {
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
+ XST_PM_INVALID_VERSION = 4,
XST_PM_NO_FEATURE = 19,
+ XST_PM_INVALID_CRC = 301,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT = 2001,
XST_PM_NO_ACCESS = 2002,
@@ -509,20 +527,18 @@ struct zynqmp_pm_query_data {
u32 arg3;
};
-int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_get_api_version(u32 *version);
int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily);
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
int zynqmp_pm_clock_enable(u32 clock_id);
int zynqmp_pm_clock_disable(u32 clock_id);
int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
-int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
-int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
@@ -559,7 +575,6 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
int zynqmp_pm_set_boot_health_status(u32 value);
int zynqmp_pm_pinctrl_request(const u32 pin);
int zynqmp_pm_pinctrl_release(const u32 pin);
-int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id);
int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
u32 *value);
@@ -596,6 +611,11 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return -ENODEV;
}
+static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
u32 *out)
{
@@ -627,16 +647,6 @@ static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
return -ENODEV;
}
-static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
-{
- return -ENODEV;
-}
-
-static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return -ENODEV;
@@ -806,11 +816,6 @@ static inline int zynqmp_pm_pinctrl_release(const u32 pin)
return -ENODEV;
}
-static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
{
return -ENODEV;
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 79ef6ac4c0..89a6888f2f 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -214,51 +214,6 @@ __kernel_size_t __fortify_strlen(const char * const POS p)
return ret;
}
-/* Defined after fortified strlen() to reuse it. */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-/**
- * strlcpy - Copy a string into another string buffer
- *
- * @p: pointer to destination of copy
- * @q: pointer to NUL-terminated source string to copy
- * @size: maximum number of bytes to write at @p
- *
- * If strlen(@q) >= @size, the copy of @q will be truncated at
- * @size - 1 bytes. @p will always be NUL-terminated.
- *
- * Do not use this function. While FORTIFY_SOURCE tries to avoid
- * over-reads when calculating strlen(@q), it is still possible.
- * Prefer strscpy(), though note its different return values for
- * detecting truncation.
- *
- * Returns total number of bytes written to @p, including terminating NUL.
- *
- */
-__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
-{
- const size_t p_size = __member_size(p);
- const size_t q_size = __member_size(q);
- size_t q_len; /* Full count of source string length. */
- size_t len; /* Count of characters going into destination. */
-
- if (p_size == SIZE_MAX && q_size == SIZE_MAX)
- return __real_strlcpy(p, q, size);
- q_len = strlen(q);
- len = (q_len >= size) ? size - 1 : q_len;
- if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) {
- /* Write size is always larger than destination. */
- if (len >= p_size)
- __write_overflow();
- }
- if (size) {
- if (len >= p_size)
- fortify_panic(__func__);
- __underlying_memcpy(p, q, len);
- p[len] = '\0';
- }
- return q_len;
-}
-
/* Defined after fortified strnlen() to reuse it. */
extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
/**
@@ -272,12 +227,6 @@ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
* @p buffer. The behavior is undefined if the string buffers overlap. The
* destination @p buffer is always NUL terminated, unless it's zero-sized.
*
- * Preferred to strlcpy() since the API doesn't require reading memory
- * from the source @q string beyond the specified @size bytes, and since
- * the return value is easier to error-check than strlcpy()'s.
- * In addition, the implementation is robust to the string changing out
- * from underneath it, unlike the current strlcpy() implementation.
- *
* Preferred to strncpy() since it always returns a valid string, and
* doesn't unnecessarily force the tail of the destination buffer to be
* zero padded. If padding is desired please use strscpy_pad().
diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h
new file mode 100644
index 0000000000..782cd5fc83
--- /dev/null
+++ b/include/linux/framer/framer-provider.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer profider header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_PROVIDER_FRAMER_H
+#define __DRIVERS_PROVIDER_FRAMER_H
+
+#include <linux/export.h>
+#include <linux/framer/framer.h>
+#include <linux/types.h>
+
+#define FRAMER_FLAG_POLL_STATUS BIT(0)
+
+/**
+ * struct framer_ops - set of function pointers for performing framer operations
+ * @init: operation to be performed for initializing the framer
+ * @exit: operation to be performed while exiting
+ * @power_on: powering on the framer
+ * @power_off: powering off the framer
+ * @flags: OR-ed flags (FRAMER_FLAG_*) to ask for core functionality
+ * - @FRAMER_FLAG_POLL_STATUS:
+ * Ask the core to perform a polling to get the framer status and
+ * notify consumers on change.
+ * The framer should call @framer_notify_status_change() when it
+ * detects a status change. This is usually done using interrupts.
+ * If the framer cannot detect this change, it can ask the core for
+ * a status polling. The core will call @get_status() periodically
+ * and, on change detected, it will notify the consumer.
+ * the @get_status()
+ * @owner: the module owner containing the ops
+ */
+struct framer_ops {
+ int (*init)(struct framer *framer);
+ void (*exit)(struct framer *framer);
+ int (*power_on)(struct framer *framer);
+ int (*power_off)(struct framer *framer);
+
+ /**
+ * @get_status:
+ *
+ * Optional.
+ *
+ * Used to get the framer status. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_status)(struct framer *framer, struct framer_status *status);
+
+ /**
+ * @set_config:
+ *
+ * Optional.
+ *
+ * Used to set the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*set_config)(struct framer *framer, const struct framer_config *config);
+
+ /**
+ * @get_config:
+ *
+ * Optional.
+ *
+ * Used to get the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_config)(struct framer *framer, struct framer_config *config);
+
+ u32 flags;
+ struct module *owner;
+};
+
+/**
+ * struct framer_provider - represents the framer provider
+ * @dev: framer provider device
+ * @children: can be used to override the default (dev->of_node) child node
+ * @owner: the module owner having of_xlate
+ * @list: to maintain a linked list of framer providers
+ * @of_xlate: function pointer to obtain framer instance from framer pointer
+ */
+struct framer_provider {
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+ struct framer * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args);
+};
+
+static inline void framer_set_drvdata(struct framer *framer, void *data)
+{
+ dev_set_drvdata(&framer->dev, data);
+}
+
+static inline void *framer_get_drvdata(struct framer *framer)
+{
+ return dev_get_drvdata(&framer->dev);
+}
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+
+/* Create and destroy a framer */
+struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+void framer_destroy(struct framer *framer);
+
+/* devm version */
+struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ struct of_phandle_args *args);
+
+struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider);
+
+struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+
+void framer_notify_status_change(struct framer *framer);
+
+#else /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+static inline struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_destroy(struct framer *framer)
+{
+}
+
+/* devm version */
+static inline struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider)
+{
+}
+
+static inline struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_notify_status_change(struct framer *framer)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+#define framer_provider_of_register(dev, xlate) \
+ __framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#define devm_framer_provider_of_register(dev, xlate) \
+ __devm_framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#endif /* __DRIVERS_PROVIDER_FRAMER_H */
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
new file mode 100644
index 0000000000..2b85fe9e7f
--- /dev/null
+++ b/include/linux/framer/framer.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_FRAMER_H
+#define __DRIVERS_FRAMER_H
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+/**
+ * enum framer_iface - Framer interface
+ * @FRAMER_IFACE_E1: E1 interface
+ * @FRAMER_IFACE_T1: T1 interface
+ */
+enum framer_iface {
+ FRAMER_IFACE_E1,
+ FRAMER_IFACE_T1,
+};
+
+/**
+ * enum framer_clock_type - Framer clock type
+ * @FRAMER_CLOCK_EXT: External clock
+ * @FRAMER_CLOCK_INT: Internal clock
+ */
+enum framer_clock_type {
+ FRAMER_CLOCK_EXT,
+ FRAMER_CLOCK_INT,
+};
+
+/**
+ * struct framer_config - Framer configuration
+ * @iface: Framer line interface
+ * @clock_type: Framer clock type
+ * @line_clock_rate: Framer line clock rate
+ */
+struct framer_config {
+ enum framer_iface iface;
+ enum framer_clock_type clock_type;
+ unsigned long line_clock_rate;
+};
+
+/**
+ * struct framer_status - Framer status
+ * @link_is_on: Framer link state. true, the link is on, false, the link is off.
+ */
+struct framer_status {
+ bool link_is_on;
+};
+
+/**
+ * enum framer_event - Event available for notification
+ * @FRAMER_EVENT_STATUS: Event notified on framer_status changes
+ */
+enum framer_event {
+ FRAMER_EVENT_STATUS,
+};
+
+/**
+ * struct framer - represents the framer device
+ * @dev: framer device
+ * @id: id of the framer device
+ * @ops: function pointers for performing framer operations
+ * @mutex: mutex to protect framer_ops
+ * @init_count: used to protect when the framer is used by multiple consumers
+ * @power_count: used to protect when the framer is used by multiple consumers
+ * @pwr: power regulator associated with the framer
+ * @notify_status_work: work structure used for status notifications
+ * @notifier_list: notifier list used for notifications
+ * @polling_work: delayed work structure used for the polling task
+ * @prev_status: previous read status used by the polling task to detect changes
+ */
+struct framer {
+ struct device dev;
+ int id;
+ const struct framer_ops *ops;
+ struct mutex mutex; /* Protect framer */
+ int init_count;
+ int power_count;
+ struct regulator *pwr;
+ struct work_struct notify_status_work;
+ struct blocking_notifier_head notifier_list;
+ struct delayed_work polling_work;
+ struct framer_status prev_status;
+};
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+int framer_pm_runtime_get(struct framer *framer);
+int framer_pm_runtime_get_sync(struct framer *framer);
+int framer_pm_runtime_put(struct framer *framer);
+int framer_pm_runtime_put_sync(struct framer *framer);
+int framer_init(struct framer *framer);
+int framer_exit(struct framer *framer);
+int framer_power_on(struct framer *framer);
+int framer_power_off(struct framer *framer);
+int framer_get_status(struct framer *framer, struct framer_status *status);
+int framer_get_config(struct framer *framer, struct framer_config *config);
+int framer_set_config(struct framer *framer, const struct framer_config *config);
+int framer_notifier_register(struct framer *framer, struct notifier_block *nb);
+int framer_notifier_unregister(struct framer *framer, struct notifier_block *nb);
+
+struct framer *framer_get(struct device *dev, const char *con_id);
+void framer_put(struct device *dev, struct framer *framer);
+
+struct framer *devm_framer_get(struct device *dev, const char *con_id);
+struct framer *devm_framer_optional_get(struct device *dev, const char *con_id);
+#else
+static inline int framer_pm_runtime_get(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_get_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_init(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_exit(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_on(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_off(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_status(struct framer *framer, struct framer_status *status)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_config(struct framer *framer, struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_set_config(struct framer *framer, const struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_register(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_unregister(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline struct framer *framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_put(struct device *dev, struct framer *framer)
+{
+}
+
+static inline struct framer *devm_framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *devm_framer_optional_get(struct device *dev, const char *con_id)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /* __DRIVERS_FRAMER_H */
diff --git a/include/linux/framer/pef2256.h b/include/linux/framer/pef2256.h
new file mode 100644
index 0000000000..71d80af58c
--- /dev/null
+++ b/include/linux/framer/pef2256.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PEF2256 consumer API
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __PEF2256_H__
+#define __PEF2256_H__
+
+#include <linux/types.h>
+
+struct pef2256;
+struct regmap;
+
+/* Retrieve the PEF2256 regmap */
+struct regmap *pef2256_get_regmap(struct pef2256 *pef2256);
+
+/* PEF2256 hardware versions */
+enum pef2256_version {
+ PEF2256_VERSION_UNKNOWN,
+ PEF2256_VERSION_1_2,
+ PEF2256_VERSION_2_1,
+ PEF2256_VERSION_2_2,
+};
+
+/* Get the PEF2256 hardware version */
+enum pef2256_version pef2256_get_version(struct pef2256 *pef2256);
+
+#endif /* __PEF2256_H__ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3226a1d47b..08ecac9d7b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -436,7 +436,7 @@ struct address_space_operations {
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
@@ -465,9 +465,9 @@ extern const struct address_space_operations empty_aops;
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
- * @private_lock: For use by the owner of the address_space.
- * @private_list: For use by the owner of the address_space.
- * @private_data: For use by the owner of the address_space.
+ * @i_private_lock: For use by the owner of the address_space.
+ * @i_private_list: For use by the owner of the address_space.
+ * @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
@@ -486,9 +486,9 @@ struct address_space {
unsigned long flags;
struct rw_semaphore i_mmap_rwsem;
errseq_t wb_err;
- spinlock_t private_lock;
- struct list_head private_list;
- void *private_data;
+ spinlock_t i_private_lock;
+ struct list_head i_private_list;
+ void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -993,8 +993,10 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
*/
struct file {
union {
+ /* fput() uses task work when closing and freeing file (default). */
+ struct callback_head f_task_work;
+ /* fput() must use workqueue (most kernel threads). */
struct llist_node f_llist;
- struct rcu_head f_rcuhead;
unsigned int f_iocb_flags;
};
@@ -1166,6 +1168,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_UNSUPPORTED 0x00000080
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
@@ -1187,7 +1190,8 @@ enum {
struct sb_writers {
unsigned short frozen; /* Is sb frozen? */
- unsigned short freeze_holders; /* Who froze fs? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1647,9 +1651,70 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
static inline bool sb_write_started(const struct super_block *sb)
{
- return lockdep_is_held_type(sb->s_writers.rw_sem + SB_FREEZE_WRITE - 1, 1);
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * file_write_started - check if SB_FREEZE_WRITE is held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_started(file_inode(file)->i_sb);
+}
+
+/**
+ * file_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_not_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_not_started(file_inode(file)->i_sb);
}
/**
@@ -2031,9 +2096,6 @@ extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t len, unsigned int flags);
int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *len, unsigned int remap_flags,
@@ -2041,9 +2103,6 @@ int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *count, unsigned int remap_flags);
-extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t len, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
@@ -2053,9 +2112,24 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
enum freeze_holder {
FREEZE_HOLDER_KERNEL = (1U << 0),
FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
};
struct super_operations {
@@ -2296,7 +2370,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_CREATING (1 << 15)
#define I_DONTCACHE (1 << 16)
#define I_SYNC_QUEUED (1 << 17)
-#define I_PINNING_FSCACHE_WB (1 << 18)
+#define I_PINNING_NETFS_WB (1 << 18)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2519,26 +2593,31 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct file *backing_file_open(const struct path *user_path, int flags,
- const struct path *real_path,
- const struct cred *cred);
struct path *backing_file_user_path(struct file *f);
/*
- * file_user_path - get the path to display for memory mapped file
- *
* When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
* stored in ->vm_file is a backing file whose f_inode is on the underlying
- * filesystem. When the mapped file path is displayed to user (e.g. via
- * /proc/<pid>/maps), this helper should be used to get the path to display
- * to the user, which is the path of the fd that user has requested to map.
+ * filesystem. When the mapped file path and inode number are displayed to
+ * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
+ * path and inode number to display to the user, which is the path of the fd
+ * that user has requested to map and the inode number that would be returned
+ * by fstat() on that same fd.
*/
+/* Get the path to display in /proc/<pid>/maps */
static inline const struct path *file_user_path(struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
return backing_file_user_path(f);
return &f->f_path;
}
+/* Get the inode whose inode number to display in /proc/<pid>/maps */
+static inline const struct inode *file_user_inode(struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return d_inode(backing_file_user_path(f)->dentry);
+ return file_inode(f);
+}
static inline struct file *file_clone_open(struct file *file)
{
@@ -2993,8 +3072,6 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
extern void
@@ -3123,7 +3200,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index a174cedf4d..bdf7f3eddf 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -189,17 +189,20 @@ extern atomic_t fscache_n_write;
extern atomic_t fscache_n_no_write_space;
extern atomic_t fscache_n_no_create_space;
extern atomic_t fscache_n_culled;
+extern atomic_t fscache_n_dio_misfit;
#define fscache_count_read() atomic_inc(&fscache_n_read)
#define fscache_count_write() atomic_inc(&fscache_n_write)
#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit)
#else
#define fscache_count_read() do {} while(0)
#define fscache_count_write() do {} while(0)
#define fscache_count_no_write_space() do {} while(0)
#define fscache_count_no_create_space() do {} while(0)
#define fscache_count_culled() do {} while(0)
+#define fscache_count_dio_misfit() do {} while(0)
#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 8e312c8323..6e8562cbcc 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -437,9 +437,6 @@ const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_r
* indicates the cache resources to which the operation state should be
* attached; @cookie indicates the cache object that will be accessed.
*
- * This is intended to be called from the ->begin_cache_operation() netfs lib
- * operation as implemented by the network filesystem.
- *
* @cres->inval_counter is set from @cookie->inval_counter for comparison at
* the end of the operation. This allows invalidation during the operation to
* be detected by the caller.
@@ -629,48 +626,6 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
}
-#if __fscache_available
-bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
- struct fscache_cookie *cookie);
-#else
-#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
- filemap_dirty_folio(MAPPING, FOLIO)
-#endif
-
-/**
- * fscache_unpin_writeback - Unpin writeback resources
- * @wbc: The writeback control
- * @cookie: The cookie referring to the cache object
- *
- * Unpin the writeback resources pinned by fscache_dirty_folio(). This is
- * intended to be called by the netfs's ->write_inode() method.
- */
-static inline void fscache_unpin_writeback(struct writeback_control *wbc,
- struct fscache_cookie *cookie)
-{
- if (wbc->unpinned_fscache_wb)
- fscache_unuse_cookie(cookie, NULL, NULL);
-}
-
-/**
- * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode
- * @cookie: The cookie referring to the cache object
- * @inode: The inode to clean up
- * @aux: Auxiliary data to apply to the inode
- *
- * Clear any writeback resources held by an inode when the inode is evicted.
- * This must be called before clear_inode() is called.
- */
-static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
- struct inode *inode,
- const void *aux)
-{
- if (inode->i_state & I_PINNING_FSCACHE_WB) {
- loff_t i_size = i_size_read(inode);
- fscache_unuse_cookie(cookie, aux, &i_size);
- }
-}
-
/**
* fscache_note_page_release - Note that a netfs page got released
* @cookie: The cookie corresponding to the file
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index bcb6609b54..8300a52869 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -100,30 +100,69 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
-/* Simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+/*
+ * fsnotify_file_area_perm - permission hook before access to file range
+ */
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
{
- int ret;
- __u32 fsnotify_mask = 0;
+ __u32 fsnotify_mask = FS_ACCESS_PERM;
+
+ /*
+ * filesystem may be modified in the context of permission events
+ * (e.g. by HSM filling a file on access), so sb freeze protection
+ * must not be held.
+ */
+ lockdep_assert_once(file_write_not_started(file));
- if (!(mask & (MAY_READ | MAY_OPEN)))
+ if (!(perm_mask & MAY_READ))
return 0;
- if (mask & MAY_OPEN) {
- fsnotify_mask = FS_OPEN_PERM;
+ return fsnotify_file(file, fsnotify_mask);
+}
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+/*
+ * fsnotify_file_perm - permission hook before file access
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+}
- if (ret)
- return ret;
- }
- } else if (mask & MAY_READ) {
- fsnotify_mask = FS_ACCESS_PERM;
+/*
+ * fsnotify_open_perm - permission hook before file open
+ */
+static inline int fsnotify_open_perm(struct file *file)
+{
+ int ret;
+
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+ if (ret)
+ return ret;
}
- return fsnotify_file(file, fsnotify_mask);
+ return fsnotify_file(file, FS_OPEN_PERM);
+}
+
+#else
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
+}
+
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return 0;
+}
+
+static inline int fsnotify_open_perm(struct file *file)
+{
+ return 0;
}
+#endif
/*
* fsnotify_link_count - inode's link count changed
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index c0892d75ce..7f63be5ca0 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -472,10 +472,8 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
struct fsnotify_mark_connector {
spinlock_t lock;
unsigned short type; /* Type of object [lock] */
-#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01
#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
unsigned short flags; /* flags [lock] */
- __kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
/* Object pointer [lock] */
fsnotify_connp_t *obj;
@@ -530,6 +528,8 @@ struct fsnotify_mark {
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
+#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800
+#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000
unsigned int flags; /* flags [mark->lock] */
};
@@ -763,11 +763,10 @@ extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
fsnotify_connp_t *connp, unsigned int obj_type,
- int add_flags, __kernel_fsid_t *fsid);
+ int add_flags);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_connp_t *connp,
- unsigned int obj_type, int add_flags,
- __kernel_fsid_t *fsid);
+ unsigned int obj_type, int add_flags);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
@@ -775,15 +774,14 @@ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
int add_flags)
{
return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, add_flags, NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
int add_flags)
{
return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, add_flags,
- NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
/* given a group and a mark, flag mark to be freed when all references are dropped */
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
index ca49947f0a..9542186039 100644
--- a/include/linux/fw_table.h
+++ b/include/linux/fw_table.h
@@ -25,16 +25,35 @@ struct acpi_subtable_proc {
int count;
};
+union fw_table_header {
+ struct acpi_table_header acpi;
+ struct acpi_table_cdat cdat;
+};
+
union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
struct acpi_prmt_module_header prmt;
struct acpi_cedt_header cedt;
+ struct acpi_cdat_header cdat;
};
int acpi_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_table_header *table_header,
+ union fw_table_header *table_header,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries);
+int cdat_table_parse(enum acpi_cdat_type type,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg,
+ struct acpi_table_cdat *table_header);
+
+/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */
+#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS)
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_fwtbl_lib __init_or_acpilib
+#else
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL)
+#define __init_or_fwtbl_lib
+#endif
+
#endif
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 6583a58670..1b6053da87 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -162,25 +162,25 @@ typedef unsigned int __bitwise gfp_t;
* %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
* !costly allocations are too essential to fail so they are implicitly
* non-failing by default (with some exceptions like OOM victims might fail so
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules
+ * implicit rules.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
* it can sleep). It will avoid disruptive actions like OOM killer. The
* caller must handle the failure which is quite likely to happen under
* heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
+ * handled at small cost, such as reduced throughput.
*
* %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
* procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
* compaction (which removes fragmentation) and page-out.
* There is still a definite limit to the number of retries, but it is
* a larger limit than with %__GFP_NORETRY.
@@ -230,7 +230,7 @@ typedef unsigned int __bitwise gfp_t;
* is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
* __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
* memory tags at the same time as zeroing memory has minimal additional
- * performace impact.
+ * performance impact.
*
* %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
* Used for userspace and vmalloc pages; the latter are unpoisoned by
@@ -274,7 +274,8 @@ typedef unsigned int __bitwise gfp_t;
* accounted to kmemcg.
*
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
*
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
@@ -325,7 +326,7 @@ typedef unsigned int __bitwise gfp_t;
#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 725e7eca99..7f75c9a518 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -531,19 +531,40 @@ struct gpio_chip {
#endif /* CONFIG_OF_GPIO */
};
-const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset);
+char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset);
+
+
+struct _gpiochip_for_each_data {
+ const char **label;
+ unsigned int *i;
+};
+
+DEFINE_CLASS(_gpiochip_for_each_data,
+ struct _gpiochip_for_each_data,
+ if (*_T.label) kfree(*_T.label),
+ ({
+ struct _gpiochip_for_each_data _data = { label, i };
+ *_data.i = 0;
+ _data;
+ }),
+ const char **label, int *i)
/**
* for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
- * @chip: the chip to query
- * @i: loop variable
- * @base: first GPIO in the range
- * @size: amount of GPIOs to check starting from @base
- * @label: label of current GPIO
+ * @_chip: the chip to query
+ * @_i: loop variable
+ * @_base: first GPIO in the range
+ * @_size: amount of GPIOs to check starting from @base
+ * @_label: label of current GPIO
*/
-#define for_each_requested_gpio_in_range(chip, i, base, size, label) \
- for (i = 0; i < size; i++) \
- if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else
+#define for_each_requested_gpio_in_range(_chip, _i, _base, _size, _label) \
+ for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
+ *_data.i < _size; \
+ (*_data.i)++, kfree(*(_data.label)), *_data.label = NULL) \
+ if ((*_data.label = \
+ gpiochip_dup_line_label(_chip, _base + *_data.i)) == NULL) {} \
+ else if (IS_ERR(*_data.label)) {} \
+ else
/* Iterates over all requested GPIO of the given @chip */
#define for_each_requested_gpio(chip, i, label) \
@@ -701,7 +722,6 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
#else
#include <asm/bug.h>
-#include <asm/errno.h>
static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain)
@@ -786,11 +806,10 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
/* struct gpio_device getters */
int gpio_device_get_base(struct gpio_device *gdev);
+const char *gpio_device_get_label(struct gpio_device *gdev);
#else /* CONFIG_GPIOLIB */
-#include <linux/err.h>
-
#include <asm/bug.h>
static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
@@ -800,16 +819,22 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
+static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
static inline int gpio_device_get_base(struct gpio_device *gdev)
{
WARN_ON(1);
return -ENODEV;
}
-static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
+static inline const char *gpio_device_get_label(struct gpio_device *gdev)
{
WARN_ON(1);
- return ERR_PTR(-ENODEV);
+ return NULL;
}
static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
index 6c75c8bd44..1a14e23922 100644
--- a/include/linux/gpio/property.h
+++ b/include/linux/gpio/property.h
@@ -2,7 +2,6 @@
#ifndef __LINUX_GPIO_PROPERTY_H
#define __LINUX_GPIO_PROPERTY_H
-#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
#include <linux/property.h>
#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 3f84aeb81e..80fa930b04 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -21,6 +21,7 @@ struct device;
* disable button via sysfs
* @value: axis value for %EV_ABS
* @irq: Irq number in case of interrupt keys
+ * @wakeirq: Optional dedicated wake-up interrupt
*/
struct gpio_keys_button {
unsigned int code;
@@ -34,6 +35,7 @@ struct gpio_keys_button {
bool can_disable;
int value;
unsigned int irq;
+ unsigned int wakeirq;
};
/**
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
index 86ea7c63a0..f316c8d0f3 100644
--- a/include/linux/habanalabs/cpucp_if.h
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -659,6 +659,12 @@ enum pq_init_status {
* number (nonce) provided by the host to prevent replay attacks.
* public key and certificate also provided as part of the FW response.
*
+ * CPUCP_PACKET_INFO_SIGNED_GET -
+ * Get the device information signed by the Trusted Platform device.
+ * device info data is also hashed with some unique number (nonce) provided
+ * by the host to prevent replay attacks. public key and certificate also
+ * provided as part of the FW response.
+ *
* CPUCP_PACKET_MONITOR_DUMP_GET -
* Get monitors registers dump from the CpuCP kernel.
* The CPU will put the registers dump in the a buffer allocated by the driver
@@ -733,7 +739,7 @@ enum cpucp_packet_id {
CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
CPUCP_PACKET_RESERVED2, /* not used */
CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
- CPUCP_PACKET_RESERVED3, /* not used */
+ CPUCP_PACKET_INFO_SIGNED_GET, /* internal */
CPUCP_PACKET_RESERVED4, /* not used */
CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
CPUCP_PACKET_RESERVED5, /* not used */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index bf43f3ff66..7c26db874f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -912,7 +912,7 @@ extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
-extern struct bus_type hid_bus_type;
+extern const struct bus_type hid_bus_type;
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index e9afb61e6e..7118ac28d4 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -77,17 +77,6 @@ enum hid_bpf_attach_flags {
int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
-/* Following functions are kfunc that we export to BPF programs */
-/* available everywhere in HID-BPF */
-__u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t __sz);
-
-/* only available in syscall */
-int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags);
-int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
- enum hid_report_type rtype, enum hid_class_request reqtype);
-struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id);
-void hid_bpf_release_context(struct hid_bpf_ctx *ctx);
-
/*
* Below is HID internal
*/
@@ -115,7 +104,7 @@ struct hid_bpf_ops {
size_t len, enum hid_report_type rtype,
enum hid_class_request reqtype);
struct module *owner;
- struct bus_type *bus_type;
+ const struct bus_type *bus_type;
};
extern struct hid_bpf_ops *hid_bpf_ops;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index be20cff4ba..451c1dff0e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -484,6 +484,82 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
}
/**
+ * folio_zero_tail - Zero the tail of a folio.
+ * @folio: The folio to zero.
+ * @offset: The byte offset in the folio to start zeroing at.
+ * @kaddr: The address the folio is currently mapped to.
+ *
+ * If you have already used kmap_local_folio() to map a folio, written
+ * some data to it and now need to zero the end of the folio (and flush
+ * the dcache), you can use this function. If you do not have the
+ * folio kmapped (eg the folio has been partially populated by DMA),
+ * use folio_zero_range() or folio_zero_segment() instead.
+ *
+ * Return: An address which can be passed to kunmap_local().
+ */
+static inline __must_check void *folio_zero_tail(struct folio *folio,
+ size_t offset, void *kaddr)
+{
+ size_t len = folio_size(folio) - offset;
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memset(kaddr, 0, max);
+ kunmap_local(kaddr);
+ len -= max;
+ offset += max;
+ max = PAGE_SIZE;
+ kaddr = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memset(kaddr, 0, len);
+ flush_dcache_folio(folio);
+
+ return kaddr;
+}
+
+/**
+ * folio_fill_tail - Copy some data to a folio and pad with zeroes.
+ * @folio: The destination folio.
+ * @offset: The offset into @folio at which to start copying.
+ * @from: The data to copy.
+ * @len: How many bytes of data to copy.
+ *
+ * This function is most useful for filesystems which support inline data.
+ * When they want to copy data from the inode into the page cache, this
+ * function does everything for them. It supports large folios even on
+ * HIGHMEM configurations.
+ */
+static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_folio(folio, offset);
+
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memcpy(to, from, max);
+ kunmap_local(to);
+ len -= max;
+ from += max;
+ offset += max;
+ max = PAGE_SIZE;
+ to = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memcpy(to, from, len);
+ to = folio_zero_tail(folio, offset + len, to + len);
+ kunmap_local(to);
+}
+
+/**
* memcpy_from_file_folio - Copy some bytes from a file folio.
* @to: The destination buffer.
* @folio: The folio to copy from.
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 4c5611d99c..5f4c74facf 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -108,17 +108,13 @@ enum qm_stop_reason {
};
enum qm_state {
- QM_INIT = 0,
- QM_START,
- QM_CLOSE,
+ QM_WORK = 0,
QM_STOP,
};
enum qp_state {
- QP_INIT = 1,
- QP_START,
+ QP_START = 1,
QP_STOP,
- QP_CLOSE,
};
enum qm_hw_ver {
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 254d4a8981..641c4567cf 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -13,13 +13,13 @@
#define _LINUX_HRTIMER_H
#include <linux/hrtimer_defs.h>
-#include <linux/rbtree.h>
+#include <linux/hrtimer_types.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/percpu.h>
+#include <linux/percpu-defs.h>
+#include <linux/rbtree.h>
#include <linux/seqlock.h>
#include <linux/timer.h>
-#include <linux/timerqueue.h>
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
@@ -60,14 +60,6 @@ enum hrtimer_mode {
};
/*
- * Return values for the callback function
- */
-enum hrtimer_restart {
- HRTIMER_NORESTART, /* Timer is not restarted */
- HRTIMER_RESTART, /* Timer must be restarted */
-};
-
-/*
* Values to track state of the timer
*
* Possible states:
@@ -95,38 +87,6 @@ enum hrtimer_restart {
#define HRTIMER_STATE_ENQUEUED 0x01
/**
- * struct hrtimer - the basic hrtimer structure
- * @node: timerqueue node, which also manages node.expires,
- * the absolute expiry time in the hrtimers internal
- * representation. The time is related to the clock on
- * which the timer is based. Is setup by adding
- * slack to the _softexpires value. For non range timers
- * identical to _softexpires.
- * @_softexpires: the absolute earliest expiry time of the hrtimer.
- * The time which was given as expiry time when the timer
- * was armed.
- * @function: timer expiry callback function
- * @base: pointer to the timer base (per cpu and per clock)
- * @state: state information (See bit values above)
- * @is_rel: Set if the timer was armed relative
- * @is_soft: Set if hrtimer will be expired in soft interrupt context.
- * @is_hard: Set if hrtimer will be expired in hard interrupt context
- * even on RT.
- *
- * The hrtimer structure must be initialized by hrtimer_init()
- */
-struct hrtimer {
- struct timerqueue_node node;
- ktime_t _softexpires;
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- u8 state;
- u8 is_rel;
- u8 is_soft;
- u8 is_hard;
-};
-
-/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure
* @task: task to wake up
diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h
new file mode 100644
index 0000000000..ad66a30817
--- /dev/null
+++ b/include/linux/hrtimer_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HRTIMER_TYPES_H
+#define _LINUX_HRTIMER_TYPES_H
+
+#include <linux/types.h>
+#include <linux/timerqueue_types.h>
+
+struct hrtimer_clock_base;
+
+/*
+ * Return values for the callback function
+ */
+enum hrtimer_restart {
+ HRTIMER_NORESTART, /* Timer is not restarted */
+ HRTIMER_RESTART, /* Timer must be restarted */
+};
+
+/**
+ * struct hrtimer - the basic hrtimer structure
+ * @node: timerqueue node, which also manages node.expires,
+ * the absolute expiry time in the hrtimers internal
+ * representation. The time is related to the clock on
+ * which the timer is based. Is setup by adding
+ * slack to the _softexpires value. For non range timers
+ * identical to _softexpires.
+ * @_softexpires: the absolute earliest expiry time of the hrtimer.
+ * The time which was given as expiry time when the timer
+ * was armed.
+ * @function: timer expiry callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
+ * @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
+ * @is_hard: Set if hrtimer will be expired in hard interrupt context
+ * even on RT.
+ *
+ * The hrtimer structure must be initialized by hrtimer_init()
+ */
+struct hrtimer {
+ struct timerqueue_node node;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ u8 state;
+ u8 is_rel;
+ u8 is_soft;
+ u8 is_hard;
+};
+
+#endif /* _LINUX_HRTIMER_TYPES_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index fa0350b081..5adb86af35 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -67,6 +67,26 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+/*
+ * Mask of all large folio orders supported for anonymous THP; all orders up to
+ * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
+ * (which is a limitation of the THP implementation).
+ */
+#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
+
+/*
+ * Mask of all large folio orders supported for file THP.
+ */
+#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+
+/*
+ * Mask of all large folio orders supported for THP.
+ */
+#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+
+#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
@@ -77,45 +97,105 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
extern unsigned long transparent_hugepage_flags;
+extern unsigned long huge_anon_orders_always;
+extern unsigned long huge_anon_orders_madvise;
+extern unsigned long huge_anon_orders_inherit;
-#define hugepage_flags_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define hugepage_flags_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
+static inline bool hugepage_global_enabled(void)
+{
+ return transparent_hugepage_flags &
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
+
+static inline bool hugepage_global_always(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
+}
+
+static inline bool hugepage_flags_enabled(void)
+{
+ /*
+ * We cover both the anon and the file-backed case here; we must return
+ * true if globally enabled, even when all anon sizes are set to never.
+ * So we don't need to look at huge_anon_orders_inherit.
+ */
+ return hugepage_global_enabled() ||
+ huge_anon_orders_always ||
+ huge_anon_orders_madvise;
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return fls_long(orders) - 1;
+}
+
+static inline int next_order(unsigned long *orders, int prev)
+{
+ *orders &= ~BIT(prev);
+ return highest_order(*orders);
+}
/*
* Do the below checks:
* - For file vma, check if the linear page offset of vma is
- * HPAGE_PMD_NR aligned within the file. The hugepage is
- * guaranteed to be hugepage-aligned within the file, but we must
- * check that the PMD-aligned addresses in the VMA map to
- * PMD-aligned offsets within the file, else the hugepage will
- * not be PMD-mappable.
- * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * order-aligned within the file. The hugepage is
+ * guaranteed to be order-aligned within the file, but we must
+ * check that the order-aligned addresses in the VMA map to
+ * order-aligned offsets within the file, else the hugepage will
+ * not be mappable.
+ * - For all vmas, check if the haddr is in an aligned hugepage
* area.
*/
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
+ unsigned long hpage_size = PAGE_SIZE << order;
unsigned long haddr;
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
- HPAGE_PMD_NR))
+ hpage_size >> PAGE_SHIFT))
return false;
}
- haddr = addr & HPAGE_PMD_MASK;
+ haddr = ALIGN_DOWN(addr, hpage_size);
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
return false;
return true;
}
+/*
+ * Filter the bitfield of input orders to the ones suitable for use in the vma.
+ * See thp_vma_suitable_order().
+ * All orders that pass the checks are returned as a bitfield.
+ */
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ int order;
+
+ /*
+ * Iterate over orders, highest to lowest, removing orders that don't
+ * meet alignment requirements from the set. Exit loop at first order
+ * that meets requirements, since all lower orders must also meet
+ * requirements.
+ */
+
+ order = highest_order(orders);
+
+ while (orders) {
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
static inline bool file_thp_enabled(struct vm_area_struct *vma)
{
struct inode *inode;
@@ -126,12 +206,55 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
inode = vma->vm_file->f_inode;
return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
- (vma->vm_flags & VM_EXEC) &&
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
- bool smaps, bool in_pf, bool enforce_sysfs);
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders);
+
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma: the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @smaps: whether answer will be used for smaps file
+ * @in_pf: whether answer will be used by page fault handler
+ * @enforce_sysfs: whether sysfs config should be taken into account
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+static inline
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ /* Optimization to check if required orders are enabled early. */
+ if (enforce_sysfs && vma_is_anonymous(vma)) {
+ unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+ if (vm_flags & VM_HUGEPAGE)
+ mask |= READ_ONCE(huge_anon_orders_madvise);
+ if (hugepage_global_always() ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ mask |= READ_ONCE(huge_anon_orders_inherit);
+
+ orders &= mask;
+ if (!orders)
+ return 0;
+ }
+
+ return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
+ enforce_sysfs, orders);
+}
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -267,17 +390,24 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return false;
}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
return false;
}
-static inline bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs)
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
{
- return false;
+ return 0;
+}
+
+static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ return 0;
}
static inline void folio_prep_large_rmappable(struct folio *folio) {}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 236ec7b63c..c1ee640d87 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -829,7 +829,7 @@ static inline unsigned huge_page_shift(struct hstate *h)
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) > MAX_ORDER;
+ return huge_page_order(h) > MAX_PAGE_ORDER;
}
static inline unsigned int pages_per_huge_page(const struct hstate *h)
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 0dae9db275..652ecb7abe 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -23,7 +23,7 @@
#include <linux/swab.h> /* for swab16 */
#include <uapi/linux/i2c.h>
-extern struct bus_type i2c_bus_type;
+extern const struct bus_type i2c_bus_type;
extern struct device_type i2c_adapter_type;
extern struct device_type i2c_client_type;
@@ -746,6 +746,8 @@ struct i2c_adapter {
struct irq_domain *host_notify_domain;
struct regulator *bus_regulator;
+
+ struct dentry *debugfs;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -850,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 84ed77c049..e119f11948 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -54,6 +54,7 @@ enum i3c_hdr_mode {
* struct i3c_priv_xfer - I3C SDR private transfer
* @rnw: encodes the transfer direction. true for a read, false for a write
* @len: transfer length in bytes of the transfer
+ * @actual_len: actual length in bytes are transferred by the controller
* @data: input/output buffer
* @data.in: input buffer. Must point to a DMA-able buffer
* @data.out: output buffer. Must point to a DMA-able buffer
@@ -62,6 +63,7 @@ enum i3c_hdr_mode {
struct i3c_priv_xfer {
u8 rnw;
u16 len;
+ u16 actual_len;
union {
void *in;
const void *out;
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 24c1863b86..0ca27dd869 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -76,7 +76,6 @@ struct i2c_dev_boardinfo {
/**
* struct i2c_dev_desc - I2C device descriptor
* @common: common part of the I2C device descriptor
- * @boardinfo: pointer to the boardinfo attached to this I2C device
* @dev: I2C device object registered to the I2C framework
* @addr: I2C device address
* @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
@@ -434,6 +433,8 @@ struct i3c_bus {
* for a future IBI
* This method is mandatory only if ->request_ibi is not
* NULL.
+ * @enable_hotjoin: enable hot join event detect.
+ * @disable_hotjoin: disable hot join event detect.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -460,6 +461,8 @@ struct i3c_master_controller_ops {
int (*disable_ibi)(struct i3c_dev_desc *dev);
void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot);
+ int (*enable_hotjoin)(struct i3c_master_controller *master);
+ int (*disable_hotjoin)(struct i3c_master_controller *master);
};
/**
@@ -473,6 +476,7 @@ struct i3c_master_controller_ops {
* @ops: master operations. See &struct i3c_master_controller_ops
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
+ * @hotjoin: true if the master support hotjoin
* @boardinfo.i3c: list of I3C boardinfo objects
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
@@ -495,6 +499,7 @@ struct i3c_master_controller {
const struct i3c_master_controller_ops *ops;
unsigned int secondary : 1;
unsigned int init_done : 1;
+ unsigned int hotjoin: 1;
struct {
struct list_head i3c;
struct list_head i2c;
@@ -551,6 +556,8 @@ int i3c_master_register(struct i3c_master_controller *master,
const struct i3c_master_controller_ops *ops,
bool secondary);
void i3c_master_unregister(struct i3c_master_controller *master);
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master);
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master);
/**
* i3c_dev_get_master_data() - get master private data attached to an I3C
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 2b5e500bf0..83c4d060a5 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -172,11 +172,11 @@
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
-/* PV1 Layout 11ah 9.8.3.1 */
+/* PV1 Layout IEEE 802.11-2020 9.8.3.1 */
#define IEEE80211_PV1_FCTL_VERS 0x0003
#define IEEE80211_PV1_FCTL_FTYPE 0x001c
#define IEEE80211_PV1_FCTL_STYPE 0x00e0
-#define IEEE80211_PV1_FCTL_TODS 0x0100
+#define IEEE80211_PV1_FCTL_FROMDS 0x0100
#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200
#define IEEE80211_PV1_FCTL_PM 0x0400
#define IEEE80211_PV1_FCTL_MOREDATA 0x0800
@@ -2720,6 +2720,7 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
/**
* struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3028af8771..c1645c86ee 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -540,7 +540,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
if (!eth_type_vlan(veth->h_vlan_proto))
- return -EINVAL;
+ return -ENODATA;
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
@@ -561,7 +561,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
return 0;
} else {
*vlan_tci = 0;
- return -EINVAL;
+ return -ENODATA;
}
}
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 6564bdcdac..18d3702fa9 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -19,14 +19,12 @@ struct device;
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
- * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
* @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
* @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
* @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
* @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
*/
enum iio_block_state {
- IIO_BLOCK_STATE_DEQUEUED,
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
@@ -73,12 +71,15 @@ struct iio_dma_buffer_block {
* @active_block: Block being used in read()
* @pos: Read offset in the active block
* @block_size: Size of each block
+ * @next_dequeue: index of next block that will be dequeued
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
+
+ unsigned int next_dequeue;
};
/**
@@ -93,7 +94,6 @@ struct iio_dma_buffer_queue_fileio {
* list and typically also a list of active blocks in the part that handles
* the DMA controller
* @incoming: List of buffers on the incoming queue
- * @outgoing: List of buffers on the outgoing queue
* @active: Whether the buffer is currently active
* @fileio: FileIO state
*/
@@ -105,7 +105,6 @@ struct iio_dma_buffer_queue {
struct mutex lock;
spinlock_t list_lock;
struct list_head incoming;
- struct list_head outgoing;
bool active;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d0ce3b7110..c5b36d2c1e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -434,13 +434,7 @@ struct iio_trigger; /* forward declaration */
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
- * @of_xlate: function pointer to obtain channel specifier index.
- * When #iio-cells is greater than '0', the driver could
- * provide a custom of_xlate function that reads the
- * *args* and returns the appropriate index in registered
- * IIO channels array.
* @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
- * Functionally the same as @of_xlate.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 117bde7d6a..d89982c983 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -68,6 +68,7 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
IIO_CHAN_INFO_CALIBAMBIENT,
IIO_CHAN_INFO_ZEROPOINT,
+ IIO_CHAN_INFO_TROUGH,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index c1c76a70a6..adb83a42a6 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -11,7 +11,7 @@
* @__VA_ARGS__: arguments for @f
*
* Avoid retpoline overhead for known builtin, checking @f vs each of them and
- * eventually invoking directly the builtin function. The functions are check
+ * eventually invoking directly the builtin function. The functions are checked
* in the given order. Fallback to the indirect call.
*/
#define INDIRECT_CALL_1(f, f1, ...) \
diff --git a/include/linux/init.h b/include/linux/init.h
index 01b52c9c75..3fa3f62413 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -179,6 +179,13 @@ extern void (*late_time_init)(void);
extern bool initcall_debug;
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
#endif
#ifndef MODULE
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 40fc5813cf..bccb3f1f62 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -37,13 +37,6 @@ extern struct cred init_cred;
#define INIT_TASK_COMM "swapper"
-/* Attach to the init_task data structure for proper alignment */
-#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
-#define __init_task_data __section(".data..init_task")
-#else
-#define __init_task_data /**/
-#endif
-
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __section(".data..init_thread_info")
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
index 5fba52a56c..5705d5de3a 100644
--- a/include/linux/input/as5011.h
+++ b/include/linux/input/as5011.h
@@ -7,7 +7,6 @@
*/
struct as5011_platform_data {
- unsigned int button_gpio;
unsigned int axis_irq; /* irq number */
unsigned long axis_irqflags;
char xp, xn; /* threshold for x axis */
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
index d464ffb4db..5192ae3f5e 100644
--- a/include/linux/input/navpoint.h
+++ b/include/linux/input/navpoint.h
@@ -5,5 +5,4 @@
struct navpoint_platform_data {
int port; /* PXA SSP port for pxa_ssp_request() */
- int gpio; /* GPIO for power on/off */
};
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index f45f13304a..7716226502 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -94,6 +94,9 @@ int ishtp_cl_link(struct ishtp_cl *cl);
void ishtp_cl_unlink(struct ishtp_cl *cl);
int ishtp_cl_disconnect(struct ishtp_cl *cl);
int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
+ int tx_size, int rx_size, bool reset);
+void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset);
int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
int ishtp_cl_flush_queues(struct ishtp_cl *cl);
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index ee07393445..a3529b962b 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -12,6 +12,19 @@
#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
/**
* struct intel_tpmi_plat_info - Platform information for a TPMI device instance
* @package_id: CPU Package id
@@ -32,7 +45,6 @@ struct intel_tpmi_plat_info {
struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
int tpmi_get_resource_count(struct auxiliary_device *auxdev);
-
-int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, int *locked,
- int *disabled);
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
+ bool *write_blocked);
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 76121c2bb4..5c9bdd3ffc 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -67,6 +67,8 @@
* later.
* IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
* depends on IRQF_PERCPU.
+ * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
+ * interrupt.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -82,6 +84,7 @@
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_NO_AUTOEN 0x00080000
#define IRQF_NO_DEBUG 0x00100000
+#define IRQF_COND_ONESHOT 0x00200000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 25142a0e2f..86cf1f7ae3 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -100,6 +100,30 @@ struct io_pgtable_cfg {
const struct iommu_flush_ops *tlb;
struct device *iommu_dev;
+ /**
+ * @alloc: Custom page allocator.
+ *
+ * Optional hook used to allocate page tables. If this function is NULL,
+ * @free must be NULL too.
+ *
+ * Memory returned should be zeroed and suitable for dma_map_single() and
+ * virt_to_phys().
+ *
+ * Not all formats support custom page allocators. Before considering
+ * passing a non-NULL value, make sure the chosen page format supports
+ * this feature.
+ */
+ void *(*alloc)(void *cookie, size_t size, gfp_t gfp);
+
+ /**
+ * @free: Custom page de-allocator.
+ *
+ * Optional hook used to free page tables allocated with the @alloc
+ * hook. Must be non-NULL if @alloc is not NULL, must be NULL
+ * otherwise.
+ */
+ void (*free)(void *cookie, void *pages, size_t size);
+
/* Low-level data specific to the table format */
union {
struct {
@@ -242,15 +266,25 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
}
/**
+ * enum io_pgtable_caps - IO page table backend capabilities.
+ */
+enum io_pgtable_caps {
+ /** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */
+ IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0),
+};
+
+/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.
*
* @alloc: Allocate a set of page tables described by cfg.
* @free: Free the page tables associated with iop.
+ * @caps: Combination of @io_pgtable_caps flags encoding the backend capabilities.
*/
struct io_pgtable_init_fns {
struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
void (*free)(struct io_pgtable *iop);
+ u32 caps;
};
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index b3a32687f5..68ed6697fe 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -6,71 +6,13 @@
#include <linux/xarray.h>
#include <uapi/linux/io_uring.h>
-enum io_uring_cmd_flags {
- IO_URING_F_COMPLETE_DEFER = 1,
- IO_URING_F_UNLOCKED = 2,
- /* the request is executed from poll, it should not be freed */
- IO_URING_F_MULTISHOT = 4,
- /* executed by io-wq */
- IO_URING_F_IOWQ = 8,
- /* int's last bit, sign checks are usually faster than a bit test */
- IO_URING_F_NONBLOCK = INT_MIN,
-
- /* ctx state flags, for URING_CMD */
- IO_URING_F_SQE128 = (1 << 8),
- IO_URING_F_CQE32 = (1 << 9),
- IO_URING_F_IOPOLL = (1 << 10),
-
- /* set when uring wants to cancel a previously issued command */
- IO_URING_F_CANCEL = (1 << 11),
- IO_URING_F_COMPAT = (1 << 12),
-};
-
-/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
-#define IORING_URING_CMD_CANCELABLE (1U << 30)
-#define IORING_URING_CMD_POLLED (1U << 31)
-
-struct io_uring_cmd {
- struct file *file;
- const struct io_uring_sqe *sqe;
- union {
- /* callback to defer completions to task context */
- void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
- /* used for polled completion */
- void *cookie;
- };
- u32 cmd_op;
- u32 flags;
- u8 pdu[32]; /* available inline for free use */
-};
-
-static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
-{
- return sqe->cmd;
-}
-
#if defined(CONFIG_IO_URING)
-int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd);
-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
- unsigned issue_flags);
-struct sock *io_uring_get_socket(struct file *file);
void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
-void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned),
- unsigned flags);
-/* users should follow semantics of IOU_F_TWQ_LAZY_WAKE */
-void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned));
-
-static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
-{
- __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
-}
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+bool io_is_uring_fops(struct file *file);
static inline void io_uring_files_cancel(void)
{
@@ -89,29 +31,7 @@ static inline void io_uring_free(struct task_struct *tsk)
if (tsk->io_uring)
__io_uring_free(tsk);
}
-int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
-void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
- unsigned int issue_flags);
-struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd);
-bool io_is_uring_fops(struct file *file);
#else
-static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
-{
- return -EOPNOTSUPP;
-}
-static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
- ssize_t ret2, unsigned issue_flags)
-{
-}
-static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
-{
-}
-static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
-{
-}
static inline void io_uring_task_cancel(void)
{
}
@@ -130,14 +50,6 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
{
return -EOPNOTSUPP;
}
-static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
-}
-static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
-{
- return NULL;
-}
static inline bool io_is_uring_fops(struct file *file)
{
return false;
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
new file mode 100644
index 0000000000..e453a997c0
--- /dev/null
+++ b/include/linux/io_uring/cmd.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_CMD_H
+#define _LINUX_IO_URING_CMD_H
+
+#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+
+/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
+#define IORING_URING_CMD_CANCELABLE (1U << 30)
+
+struct io_uring_cmd {
+ struct file *file;
+ const struct io_uring_sqe *sqe;
+ /* callback to defer completions to task context */
+ void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+ u32 cmd_op;
+ u32 flags;
+ u8 pdu[32]; /* available inline for free use */
+};
+
+static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
+{
+ return sqe->cmd;
+}
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+ unsigned issue_flags);
+void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ unsigned flags);
+
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags);
+
+#else
+static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ return -EOPNOTSUPP;
+}
+static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+ ssize_t ret2, unsigned issue_flags)
+{
+}
+static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ unsigned flags)
+{
+}
+static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+}
+#endif
+
+/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
+static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
+}
+
+static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
+}
+
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->task;
+}
+
+#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 335eca49dc..73de1f6f0a 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -7,6 +7,37 @@
#include <linux/llist.h>
#include <uapi/linux/io_uring.h>
+enum {
+ /*
+ * A hint to not wake right away but delay until there are enough of
+ * tw's queued to match the number of CQEs the task is waiting for.
+ *
+ * Must not be used wirh requests generating more than one CQE.
+ * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
+ */
+ IOU_F_TWQ_LAZY_WAKE = 1,
+};
+
+enum io_uring_cmd_flags {
+ IO_URING_F_COMPLETE_DEFER = 1,
+ IO_URING_F_UNLOCKED = 2,
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 4,
+ /* executed by io-wq */
+ IO_URING_F_IOWQ = 8,
+ /* int's last bit, sign checks are usually faster than a bit test */
+ IO_URING_F_NONBLOCK = INT_MIN,
+
+ /* ctx state flags, for URING_CMD */
+ IO_URING_F_SQE128 = (1 << 8),
+ IO_URING_F_CQE32 = (1 << 9),
+ IO_URING_F_IOPOLL = (1 << 10),
+
+ /* set when uring wants to cancel a previously issued command */
+ IO_URING_F_CANCEL = (1 << 11),
+ IO_URING_F_COMPAT = (1 << 12),
+};
+
struct io_wq_work_node {
struct io_wq_work_node *next;
};
@@ -250,7 +281,6 @@ struct io_ring_ctx {
struct io_submit_state submit_state;
- struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct io_hash_table cancel_table_locked;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 81553770e4..c948289f64 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -106,7 +106,7 @@ struct iommu_domain {
unsigned type;
const struct iommu_domain_ops *ops;
const struct iommu_dirty_ops *dirty_ops;
-
+ const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry;
struct iommu_dma_cookie *iova_cookie;
@@ -121,6 +121,11 @@ struct iommu_domain {
struct { /* IOMMU_DOMAIN_SVA */
struct mm_struct *mm;
int users;
+ /*
+ * Next iommu_domain in mm->iommu_mm->sva-domains list
+ * protected by iommu_sva_lock.
+ */
+ struct list_head next;
};
};
};
@@ -285,6 +290,23 @@ struct iommu_user_data {
};
/**
+ * struct iommu_user_data_array - iommu driver specific user space data array
+ * @type: The data type of all the entries in the user buffer array
+ * @uptr: Pointer to the user buffer array
+ * @entry_len: The fixed-width length of an entry in the array, in bytes
+ * @entry_num: The number of total entries in the array
+ *
+ * The user buffer includes an array of requests with format defined in
+ * include/uapi/linux/iommufd.h
+ */
+struct iommu_user_data_array {
+ unsigned int type;
+ void __user *uptr;
+ size_t entry_len;
+ u32 entry_num;
+};
+
+/**
* __iommu_copy_struct_from_user - Copy iommu driver specific user space data
* @dst_data: Pointer to an iommu driver specific user data that is defined in
* include/uapi/linux/iommufd.h
@@ -325,6 +347,57 @@ static inline int __iommu_copy_struct_from_user(
offsetofend(typeof(*kdst), min_last))
/**
+ * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_array: Pointer to a struct iommu_user_data_array for a user space array
+ * @data_type: The data type of the @dst_data. Must match with @src_array.type
+ * @index: Index to the location in the array to copy user data from
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user_array(
+ void *dst_data, const struct iommu_user_data_array *src_array,
+ unsigned int data_type, unsigned int index, size_t data_len,
+ size_t min_len)
+{
+ struct iommu_user_data src_data;
+
+ if (WARN_ON(!src_array || index >= src_array->entry_num))
+ return -EINVAL;
+ if (!src_array->entry_num)
+ return -EINVAL;
+ src_data.uptr = src_array->uptr + src_array->entry_len * index;
+ src_data.len = src_array->entry_len;
+ src_data.type = src_array->type;
+
+ return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
+ data_len, min_len);
+}
+
+/**
+ * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ * @index: Index to the location in the array to copy user data from
+ * @min_last: The last member of the data structure @kdst points in the
+ * initial version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
+ min_last) \
+ __iommu_copy_struct_from_user_array( \
+ kdst, user_array, data_type, index, sizeof(*(kdst)), \
+ offsetofend(typeof(*(kdst)), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @hw_info: report iommu hardware information. The data buffer returned by this
@@ -414,6 +487,7 @@ struct iommu_ops {
struct module *owner;
struct iommu_domain *identity_domain;
struct iommu_domain *blocked_domain;
+ struct iommu_domain *release_domain;
struct iommu_domain *default_domain;
};
@@ -440,6 +514,13 @@ struct iommu_ops {
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
+ * @cache_invalidate_user: Flush hardware cache for user space IO page table.
+ * The @domain must be IOMMU_DOMAIN_NESTED. The @array
+ * passes in the cache invalidation requests, in form
+ * of a driver data structure. The driver must update
+ * array->entry_num to report the number of handled
+ * invalidation requests. The driver data structure
+ * must be defined in include/uapi/linux/iommufd.h
* @iova_to_phys: translate iova to physical address
* @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
* including no-snoop TLPs on PCIe or other platform
@@ -465,6 +546,8 @@ struct iommu_domain_ops {
size_t size);
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
+ int (*cache_invalidate_user)(struct iommu_domain *domain,
+ struct iommu_user_data_array *array);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
dma_addr_t iova);
@@ -810,6 +893,14 @@ struct iommu_fwspec {
struct iommu_sva {
struct device *dev;
struct iommu_domain *domain;
+ struct list_head handle_item;
+ refcount_t users;
+};
+
+struct iommu_mm_data {
+ u32 pasid;
+ struct list_head sva_domains;
+ struct list_head sva_handles;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -840,10 +931,7 @@ static inline void *dev_iommu_priv_get(struct device *dev)
return NULL;
}
-static inline void dev_iommu_priv_set(struct device *dev, void *priv)
-{
- dev->iommu->priv = priv;
-}
+void dev_iommu_priv_set(struct device *dev, void *priv);
extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev);
@@ -1337,19 +1425,31 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream
return false;
}
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_IOMMU_MM_DATA
static inline void mm_pasid_init(struct mm_struct *mm)
{
- mm->pasid = IOMMU_PASID_INVALID;
+ /*
+ * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
+ * the new mm and the old one point to a same iommu_mm instance. When either
+ * one of the two mms gets released, the iommu_mm instance is freed, leaving
+ * the other mm running into a use-after-free/double-free problem. To avoid
+ * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
+ */
+ mm->iommu_mm = NULL;
}
+
static inline bool mm_valid_pasid(struct mm_struct *mm)
{
- return mm->pasid != IOMMU_PASID_INVALID;
+ return READ_ONCE(mm->iommu_mm);
}
static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
{
- return mm->pasid;
+ struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
+
+ if (!iommu_mm)
+ return IOMMU_PASID_INVALID;
+ return iommu_mm->pasid;
}
void mm_pasid_drop(struct mm_struct *mm);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 14f5cfabbb..db7fe25f33 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -331,6 +331,9 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
+walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 7578d4f6a9..db1249cd96 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -47,7 +47,30 @@ static inline int task_nice_ioclass(struct task_struct *task)
}
#ifdef CONFIG_BLOCK
-int __get_task_ioprio(struct task_struct *p);
+/*
+ * If the task has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ *
+ * Expected to be called for current task or with task_lock() held to keep
+ * io_context stable.
+ */
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ struct io_context *ioc = p->io_context;
+ int prio;
+
+ if (!ioc)
+ return IOPRIO_DEFAULT;
+
+ if (p != current)
+ lockdep_assert_held(&p->alloc_lock);
+
+ prio = ioc->ioprio;
+ if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
+ prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+ return prio;
+}
#else
static inline int __get_task_ioprio(struct task_struct *p)
{
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
index 1b06d074ad..e3649a6563 100644
--- a/include/linux/iosys-map.h
+++ b/include/linux/iosys-map.h
@@ -168,9 +168,9 @@ struct iosys_map {
* about the use of uninitialized variable.
*/
#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
- struct iosys_map copy = *map_; \
- iosys_map_incr(&copy, offset_); \
- copy; \
+ struct iosys_map copy_ = *map_; \
+ iosys_map_incr(&copy_, offset_); \
+ copy_; \
})
/**
@@ -391,14 +391,14 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* Returns:
* The value read from the mapping.
*/
-#define iosys_map_rd(map__, offset__, type__) ({ \
- type__ val; \
- if ((map__)->is_iomem) { \
- __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\
- } else { \
- __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \
- } \
- val; \
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val_; \
+ if ((map__)->is_iomem) { \
+ __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+ val_; \
})
/**
@@ -413,13 +413,13 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* or if pointer may be unaligned (and problematic for the architecture
* supported), use iosys_map_memcpy_to()
*/
-#define iosys_map_wr(map__, offset__, type__, val__) ({ \
- type__ val = (val__); \
- if ((map__)->is_iomem) { \
- __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\
- } else { \
- __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \
- } \
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val_ = (val__); \
+ if ((map__)->is_iomem) { \
+ __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
})
/**
@@ -485,9 +485,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* The value read from the mapping.
*/
#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
- struct_type__ *s; \
+ struct_type__ *s_; \
iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
- typeof(s->field__)); \
+ typeof(s_->field__)); \
})
/**
@@ -508,9 +508,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* usage and memory layout.
*/
#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
- struct_type__ *s; \
+ struct_type__ *s_; \
iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
- typeof(s->field__), val__); \
+ typeof(s_->field__), val__); \
})
#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index e1c9eea601..9b1434247a 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_IPC_H
#define _LINUX_IPC_H
-#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
#include <linux/uidgid.h>
#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 2b665c32f5..3f003d5fde 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -12,6 +12,7 @@
#ifndef _LINUX_TRACE_IRQFLAGS_H
#define _LINUX_TRACE_IRQFLAGS_H
+#include <linux/irqflags_types.h>
#include <linux/typecheck.h>
#include <linux/cleanup.h>
#include <asm/irqflags.h>
@@ -34,19 +35,6 @@
#ifdef CONFIG_TRACE_IRQFLAGS
-/* Per-task IRQ trace events information. */
-struct irqtrace_events {
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
-};
-
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
@@ -126,7 +114,7 @@ do { \
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
-# define lockdep_hrtimer_exit(__context) do { } while (0)
+# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
# define lockdep_posixtimer_exit() do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)
diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h
new file mode 100644
index 0000000000..c13f0d9150
--- /dev/null
+++ b/include/linux/irqflags_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQFLAGS_TYPES_H
+#define _LINUX_IRQFLAGS_TYPES_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+#endif
+
+#endif /* _LINUX_IRQFLAGS_TYPES_H */
diff --git a/include/linux/ism.h b/include/linux/ism.h
index 9a4c204df3..5428edd909 100644
--- a/include/linux/ism.h
+++ b/include/linux/ism.h
@@ -86,7 +86,6 @@ int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
unsigned int offset, void *data, unsigned int size);
-u8 *ism_get_seid(void);
const struct smcd_ops *ism_get_smcd_ops(void);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index beb30719ee..971f3e826e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -756,11 +756,6 @@ struct journal_s
unsigned long j_flags;
/**
- * @j_atomic_flags: Atomic journaling state flags.
- */
- unsigned long j_atomic_flags;
-
- /**
* @j_errno:
*
* Is there an outstanding uncleared error on the journal (from a prior
@@ -999,6 +994,13 @@ struct journal_s
struct block_device *j_fs_dev;
/**
+ * @j_fs_dev_wb_err:
+ *
+ * Records the errseq of the client fs's backing block device.
+ */
+ errseq_t j_fs_dev_wb_err;
+
+ /**
* @j_total_len: Total maximum capacity of the journal region on disk.
*/
unsigned int j_total_len;
@@ -1400,12 +1402,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
JBD2_JOURNAL_FLUSH_ZEROOUT)
/*
- * Journal atomic flag definitions
- */
-#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing
- * buffer back to disk */
-
-/*
* Function declarations for the journaling transaction and buffer
* management
*/
@@ -1698,6 +1694,25 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
handle->h_aborted = 1;
}
+static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ /*
+ * Save the original wb_err value of client fs's bdev mapping which
+ * could be used to detect the client fs's metadata async write error.
+ */
+ errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err);
+}
+
+static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ return errseq_check(&mapping->wb_err,
+ READ_ONCE(journal->j_fs_dev_wb_err));
+}
+
#endif /* __KERNEL__ */
/* Comparison functions for transaction IDs: perform comparisons using
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 72cb693b07..dbb06d789e 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -4,6 +4,7 @@
#include <linux/bug.h>
#include <linux/kasan-enabled.h>
+#include <linux/kasan-tags.h>
#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -129,20 +130,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab)
__kasan_poison_slab(slab);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_unpoison_object_data(cache, object);
+ __kasan_unpoison_new_object(cache, object);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_poison_object_data(cache, object);
+ __kasan_poison_new_object(cache, object);
}
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -172,13 +192,6 @@ static __always_inline void kasan_kfree_large(void *ptr)
__kasan_kfree_large(ptr, _RET_IP_);
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr)
-{
- if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, _RET_IP_);
-}
-
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
void *object, gfp_t flags, bool init);
static __always_inline void * __must_check kasan_slab_alloc(
@@ -219,6 +232,113 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_pages().
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function unpoisons a page allocation that was previously poisoned by
+ * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
+ * the tag-based modes, this function assigns a new tag to the allocation.
+ */
+static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
+/**
+ * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function poisons a slab allocation and saves a free stack trace for it
+ * without initializing the allocation's memory and without putting it into the
+ * quarantine (for the Generic mode).
+ *
+ * This function also performs checks to detect double-free and invalid-free
+ * bugs and reports them. The caller can use the return value of this function
+ * to find out if the allocation is buggy.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_object().
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_object(void *ptr)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_object(ptr, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
+/**
+ * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ * @size: Size to be unpoisoned.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function unpoisons a slab allocation that was previously poisoned via
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ */
+static __always_inline void kasan_mempool_unpoison_object(void *ptr,
+ size_t size)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
+}
+
/*
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
@@ -242,9 +362,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
return false;
}
static inline void kasan_poison_slab(struct slab *slab) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
@@ -256,7 +376,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init
return false;
}
static inline void kasan_kfree_large(void *ptr) {}
-static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags, bool init)
{
@@ -276,6 +395,17 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
+static inline bool kasan_mempool_poison_object(void *ptr)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
+
static inline bool kasan_check_byte(const void *address)
{
return true;
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 99aaa050cc..e857a150ba 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -223,6 +223,8 @@ struct kernfs_node {
unsigned short flags;
umode_t mode;
struct kernfs_iattrs *iattr;
+
+ struct rcu_head rcu;
};
/*
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 8227455192..400cb6c021 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -403,7 +403,7 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS)
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
@@ -500,6 +500,13 @@ static inline int crash_hotplug_memory_support(void) { return 0; }
static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
#endif
+extern bool kexec_file_dbg_print;
+
+#define kexec_dprintk(fmt, ...) \
+ printk("%s" fmt, \
+ kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \
+ ##__VA_ARGS__)
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 401af47575..88100cc9ca 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -223,6 +223,8 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
#else /* CONFIG_KFENCE */
+#define kfence_sample_interval (0)
+
static inline bool is_kfence_address(const void *addr) { return false; }
static inline void kfence_alloc_pool_and_metadata(void) { }
static inline void kfence_init(void) { }
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
index 8bfa6c9817..929287981a 100644
--- a/include/linux/kmsan_types.h
+++ b/include/linux/kmsan_types.h
@@ -9,6 +9,8 @@
#ifndef _LINUX_KMSAN_TYPES_H
#define _LINUX_KMSAN_TYPES_H
+#include <linux/types.h>
+
/* These constants are defined in the MSan LLVM instrumentation pass. */
#define KMSAN_RETVAL_SIZE 800
#define KMSAN_PARAM_SIZE 800
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index c2dd786a30..401348e9f9 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -76,8 +76,8 @@ static inline void ksm_exit(struct mm_struct *mm)
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
@@ -129,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- return page;
+ return folio;
}
static inline void rmap_walk_ksm(struct folio *folio,
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 73f20deb49..3a4e723eae 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -21,12 +21,10 @@
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
-#include <linux/time.h>
-#include <linux/jiffies.h>
#include <asm/bug.h>
-
-/* Nanosecond scalar representation for kernel time values */
-typedef s64 ktime_t;
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/types.h>
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4944136efa..179df96b20 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -80,8 +80,8 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
-#ifndef KVM_ADDRESS_SPACE_NUM
-#define KVM_ADDRESS_SPACE_NUM 1
+#ifndef KVM_MAX_NR_ADDRESS_SPACES
+#define KVM_MAX_NR_ADDRESS_SPACES 1
#endif
/*
@@ -253,9 +253,10 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
union kvm_mmu_notifier_arg {
pte_t pte;
+ unsigned long attributes;
};
struct kvm_gfn_range {
@@ -588,8 +589,20 @@ struct kvm_memory_slot {
u32 flags;
short id;
u16 as_id;
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+ struct {
+ struct file __rcu *file;
+ pgoff_t pgoff;
+ } gmem;
+#endif
};
+static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
+{
+ return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
+}
+
static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
{
return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
@@ -677,13 +690,29 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
#define KVM_MEM_SLOTS_NUM SHRT_MAX
#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
-#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+#if KVM_MAX_NR_ADDRESS_SPACES == 1
+static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
+{
+ return KVM_MAX_NR_ADDRESS_SPACES;
+}
+
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
return 0;
}
#endif
+/*
+ * Arch code must define kvm_arch_has_private_mem if support for private memory
+ * is enabled.
+ */
+#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
+static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
struct kvm_memslots {
u64 generation;
atomic_long_t last_used_slot;
@@ -721,9 +750,9 @@ struct kvm {
struct mm_struct *mm; /* userspace tied to this vm */
unsigned long nr_memslot_pages;
/* The two memslot sets - active and inactive (per address space) */
- struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
+ struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
/* The current active memslot set for each address space */
- struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
+ struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
struct xarray vcpu_array;
/*
* Protected by slots_lock, but can be read outside if an
@@ -753,7 +782,7 @@ struct kvm {
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
-#ifdef CONFIG_HAVE_KVM_EVENTFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
struct {
spinlock_t lock;
struct list_head items;
@@ -761,8 +790,8 @@ struct kvm {
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
- struct list_head ioeventfds;
#endif
+ struct list_head ioeventfds;
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
@@ -778,17 +807,16 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
-#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+
struct hlist_head irq_ack_notifier_list;
#endif
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
struct mmu_notifier mmu_notifier;
unsigned long mmu_invalidate_seq;
long mmu_invalidate_in_progress;
- unsigned long mmu_invalidate_range_start;
- unsigned long mmu_invalidate_range_end;
+ gfn_t mmu_invalidate_range_start;
+ gfn_t mmu_invalidate_range_end;
#endif
struct list_head devices;
u64 manual_dirty_log_protect;
@@ -807,6 +835,10 @@ struct kvm {
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
struct notifier_block pm_notifier;
#endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ /* Protected by slots_locks (for writes) and RCU (for reads) */
+ struct xarray mem_attr_array;
+#endif
char stats_id[KVM_STATS_NAME_SIZE];
};
@@ -965,7 +997,7 @@ static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
}
#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
@@ -989,7 +1021,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
+ as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
@@ -1146,9 +1178,9 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region2 *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1392,10 +1424,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
- unsigned long end);
-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
- unsigned long end);
+void kvm_mmu_invalidate_begin(struct kvm *kvm);
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
+void kvm_mmu_invalidate_end(struct kvm *kvm);
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -1947,7 +1979,7 @@ extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
extern const struct kvm_stats_header kvm_vcpu_stats_header;
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
if (unlikely(kvm->mmu_invalidate_in_progress))
@@ -1970,9 +2002,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
return 0;
}
-static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
unsigned long mmu_seq,
- unsigned long hva)
+ gfn_t gfn)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
@@ -1981,14 +2013,50 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
* that might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
- if (unlikely(kvm->mmu_invalidate_in_progress) &&
- hva >= kvm->mmu_invalidate_range_start &&
- hva < kvm->mmu_invalidate_range_end)
- return 1;
+ if (unlikely(kvm->mmu_invalidate_in_progress)) {
+ /*
+ * Dropping mmu_lock after bumping mmu_invalidate_in_progress
+ * but before updating the range is a KVM bug.
+ */
+ if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
+ kvm->mmu_invalidate_range_end == INVALID_GPA))
+ return 1;
+
+ if (gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return 1;
+ }
+
if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
+
+/*
+ * This lockless version of the range-based retry check *must* be paired with a
+ * call to the locked version after acquiring mmu_lock, i.e. this is safe to
+ * use only as a pre-check to avoid contending mmu_lock. This version *will*
+ * get false negatives and false positives.
+ */
+static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
+{
+ /*
+ * Use READ_ONCE() to ensure the in-progress flag and sequence counter
+ * are always read from memory, e.g. so that checking for retry in a
+ * loop won't result in an infinite retry loop. Don't force loads for
+ * start+end, as the key to avoiding infinite retry loops is observing
+ * the 1=>0 transition of in-progress, i.e. getting false negatives
+ * due to stale start+end values is acceptable.
+ */
+ if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
+ gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return true;
+
+ return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
@@ -2013,12 +2081,10 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-#ifdef CONFIG_HAVE_KVM_EVENTFD
-
void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
bool kvm_notify_irqfd_resampler(struct kvm *kvm,
@@ -2039,31 +2105,7 @@ static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
{
return false;
}
-#endif
-
-#else
-
-static inline void kvm_eventfd_init(struct kvm *kvm) {}
-
-static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
-{
- return -EINVAL;
-}
-
-static inline void kvm_irqfd_release(struct kvm *kvm) {}
-
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm)
-{
-}
-#endif
-
-static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_HAVE_KVM_EVENTFD */
+#endif /* CONFIG_HAVE_KVM_IRQCHIP */
void kvm_arch_irq_routing_update(struct kvm *kvm);
@@ -2318,4 +2360,57 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
/* Max number of entries allowed for each kvm dirty ring */
#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+ gpa_t gpa, gpa_t size,
+ bool is_write, bool is_exec,
+ bool is_private)
+{
+ vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
+ vcpu->run->memory_fault.gpa = gpa;
+ vcpu->run->memory_fault.size = size;
+
+ /* RWX flags are not (yet) defined or communicated to userspace. */
+ vcpu->run->memory_fault.flags = 0;
+ if (is_private)
+ vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
+}
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
+{
+ return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
+}
+
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ unsigned long attrs);
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) &&
+ kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+}
+#else
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+#else
+static inline int kvm_gmem_get_pfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ kvm_pfn_t *pfn, int *max_order)
+{
+ KVM_BUG_ON(1, kvm);
+ return -EIO;
+}
+#endif /* CONFIG_KVM_PRIVATE_MEM */
+
#endif
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 6f4737d504..9d1f7835d8 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -6,6 +6,7 @@
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
+struct kvm_gfn_range;
struct kvm_interrupt;
struct kvm_irq_routing_table;
struct kvm_memory_slot;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index aa16dc2a82..4754b02d3a 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -527,23 +527,6 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
return led_cdev->trigger_data;
}
-/**
- * led_trigger_rename_static - rename a trigger
- * @name: the new trigger name
- * @trig: the LED trigger to rename
- *
- * Change a LED trigger name by copying the string passed in
- * name into current trigger name, which MUST be large
- * enough for the new string.
- *
- * Note that name must NOT point to the same string used
- * during LED registration, as that could lead to races.
- *
- * This is meant to be used on triggers with statically
- * allocated name.
- */
-void led_trigger_rename_static(const char *name, struct led_trigger *trig);
-
#define module_led_trigger(__led_trigger) \
module_driver(__led_trigger, led_trigger_register, \
led_trigger_unregister)
@@ -588,6 +571,9 @@ enum led_trigger_netdev_modes {
TRIGGER_NETDEV_LINK_10,
TRIGGER_NETDEV_LINK_100,
TRIGGER_NETDEV_LINK_1000,
+ TRIGGER_NETDEV_LINK_2500,
+ TRIGGER_NETDEV_LINK_5000,
+ TRIGGER_NETDEV_LINK_10000,
TRIGGER_NETDEV_HALF_DUPLEX,
TRIGGER_NETDEV_FULL_DUPLEX,
TRIGGER_NETDEV_TX,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 1d2afcd475..324d792e7c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -472,7 +472,7 @@ enum ata_completion_errors {
/*
* Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * alter libata-sata.c (for the ascii descriptions)
*/
enum ata_lpm_policy {
ATA_LPM_UNKNOWN,
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index 7303b4bc2c..287f590ed5 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -10,6 +10,11 @@ static inline void linkmode_zero(unsigned long *dst)
bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline void linkmode_fill(unsigned long *dst)
+{
+ bitmap_fill(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
{
bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/include/linux/list.h b/include/linux/list.h
index 1837caedf7..059aa1fff4 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -1119,6 +1119,26 @@ static inline void hlist_move_list(struct hlist_head *old,
old->first = NULL;
}
+/**
+ * hlist_splice_init() - move all entries from one list to another
+ * @from: hlist_head from which entries will be moved
+ * @last: last entry on the @from list
+ * @to: hlist_head to which entries will be moved
+ *
+ * @to can be empty, @from must contain at least @last.
+ */
+static inline void hlist_splice_init(struct hlist_head *from,
+ struct hlist_node *last,
+ struct hlist_head *to)
+{
+ if (to->first)
+ to->first->pprev = &last->next;
+ last->next = to->first;
+ to->first = from->first;
+ from->first->pprev = &to->first;
+ from->first = NULL;
+}
+
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index b35968ee9f..7675a48a07 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -73,8 +73,10 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
/**
* list_lru_add: add an element to the lru list's tail
- * @list_lru: the lru pointer
+ * @lru: the lru pointer
* @item: the item to be added.
+ * @nid: the node id of the sublist to add the item to.
+ * @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
* nothing. Therefore the caller does not need to keep state about whether or
@@ -83,24 +85,54 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
* the caller organize itself in a way that elements can be in more than
* one type of list, it is up to the caller to fully remove the item from
* the previous list (with list_lru_del() for instance) before moving it
- * to @list_lru
+ * to @lru.
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_add_obj: add an element to the lru list's tail
+ * @lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This function is similar to list_lru_add(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
*
* Return value: true if the list was updated, false otherwise
*/
-bool list_lru_add(struct list_lru *lru, struct list_head *item);
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_del: delete an element to the lru list
- * @list_lru: the lru pointer
+ * list_lru_del: delete an element from the lru list
+ * @lru: the lru pointer
* @item: the item to be deleted.
+ * @nid: the node id of the sublist to delete the item from.
+ * @memcg: the cgroup of the sublist to delete the item from.
*
- * This function works analogously as list_lru_add in terms of list
+ * This function works analogously as list_lru_add() in terms of list
* manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del.
+ * a list are also valid for list_lru_del().
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_del(struct list_lru *lru, struct list_head *item);
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_del_obj: delete an element from the lru list
+ * @lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function is similar to list_lru_del(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return value: true if the list was updated, false otherwise.
+ */
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
/**
* list_lru_count_one: return the number of objects currently held by @lru
@@ -108,9 +140,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* @nid: the node id to count from.
* @memcg: the cgroup to count from.
*
- * Always return a non-negative number, 0 for empty lists. There is no
- * guarantee that the list is not updated while the count is being computed.
- * Callers that want such a guarantee need to provide an outer lock.
+ * There is no guarantee that the list is not updated while the count is being
+ * computed. Callers that want such a guarantee need to provide an outer lock.
+ *
+ * Return: 0 for empty lists, otherwise the number of objects
+ * currently held by @lru.
*/
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
@@ -136,12 +170,28 @@ static inline unsigned long list_lru_count(struct list_lru *lru)
void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
+/**
+ * list_lru_putback: undo list_lru_isolate
+ * @lru: the lru pointer.
+ * @item: the item to put back.
+ * @nid: the node id of the sublist to put the item back to.
+ * @memcg: the cgroup of the sublist to put the item back to.
+ *
+ * Put back an isolated item into its original LRU. Note that unlike
+ * list_lru_add, this does not increment the node LRU count (as
+ * list_lru_isolate does not originally decrement this count).
+ *
+ * Since we might have dropped the LRU lock in between, recompute list_lru_one
+ * from the node's id and memcg.
+ */
+void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
/**
- * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -150,24 +200,24 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * This function will scan all elements in a particular list_lru, calling the
+ * This function will scan all elements in a particular @lru, calling the
* @isolate callback for each of those items, along with the current list
* spinlock and a caller-provided opaque. The @isolate callback can choose to
* drop the lock internally, but *must* return with the lock held. The callback
- * will return an enum lru_status telling the list_lru infrastructure what to
+ * will return an enum lru_status telling the @lru infrastructure what to
* do with the object being scanned.
*
- * Please note that nr_to_walk does not mean how many objects will be freed,
+ * Please note that @nr_to_walk does not mean how many objects will be freed,
* just how many objects will be scanned.
*
- * Return value: the number of objects effectively removed from the LRU.
+ * Return: the number of objects effectively removed from the LRU.
*/
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
/**
- * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -176,7 +226,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * Same as list_lru_walk_one() except that the spinlock is acquired with
* spin_lock_irq().
*/
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index dc2844b071..08b0d1d9d7 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -82,63 +82,6 @@ struct lock_chain {
u64 chain_key;
};
-#define MAX_LOCKDEP_KEYS_BITS 13
-#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
-#define INITIAL_CHAIN_KEY -1
-
-struct held_lock {
- /*
- * One-way hash of the dependency chain up to this point. We
- * hash the hashes step by step as the dependency chain grows.
- *
- * We use it for dependency-caching and we skip detection
- * passes and dependency-updates if there is a cache-hit, so
- * it is absolutely critical for 100% coverage of the validator
- * to have a unique key value for every unique dependency path
- * that can occur in the system, to make a unique hash value
- * as likely as possible - hence the 64-bit width.
- *
- * The task struct holds the current hash value (initialized
- * with zero), here we store the previous hash value:
- */
- u64 prev_chain_key;
- unsigned long acquire_ip;
- struct lockdep_map *instance;
- struct lockdep_map *nest_lock;
-#ifdef CONFIG_LOCK_STAT
- u64 waittime_stamp;
- u64 holdtime_stamp;
-#endif
- /*
- * class_idx is zero-indexed; it points to the element in
- * lock_classes this held lock instance belongs to. class_idx is in
- * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
- */
- unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
- /*
- * The lock-stack is unified in that the lock chains of interrupt
- * contexts nest ontop of process context chains, but we 'separate'
- * the hashes by starting with 0 if we cross into an interrupt
- * context, and we also keep do not add cross-context lock
- * dependencies - the lock usage graph walking covers that area
- * anyway, and we'd just unnecessarily increase the number of
- * dependencies otherwise. [Note: hardirq and softirq contexts
- * are separated from each other too.]
- *
- * The following field is used to detect when we cross into an
- * interrupt context:
- */
- unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
- unsigned int trylock:1; /* 16 bits */
-
- unsigned int read:2; /* see lock_acquire() comment */
- unsigned int check:1; /* see lock_acquire() comment */
- unsigned int hardirqs_off:1;
- unsigned int sync:1;
- unsigned int references:11; /* 32 bits */
- unsigned int pin_count;
-};
-
/*
* Initialization, self-test and debugging-output methods:
*/
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 2ebc323d34..70d30d40ea 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -127,12 +127,12 @@ struct lock_class {
unsigned long usage_mask;
const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+ const char *name;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
- const char *name;
u8 wait_type_inner;
u8 wait_type_outer;
@@ -198,6 +198,63 @@ struct lockdep_map {
struct pin_cookie { unsigned int val; };
+#define MAX_LOCKDEP_KEYS_BITS 13
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
+ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+ unsigned int trylock:1; /* 16 bits */
+
+ unsigned int read:2; /* see lock_acquire() comment */
+ unsigned int check:1; /* see lock_acquire() comment */
+ unsigned int hardirqs_off:1;
+ unsigned int sync:1;
+ unsigned int references:11; /* 32 bits */
+ unsigned int pin_count;
+};
+
#else /* !CONFIG_LOCKDEP */
/*
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index ed3d517460..f9b5baf1b5 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -264,6 +264,10 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
struct inode *inode)
+LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr,
+ struct lsm_ctx __user *ctx, u32 *size, u32 flags)
+LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr,
+ struct lsm_ctx *ctx, u32 size, u32 flags)
LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index dcb5e5b5eb..a2ade0ffe9 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -25,6 +25,7 @@
#ifndef __LINUX_LSM_HOOKS_H
#define __LINUX_LSM_HOOKS_H
+#include <uapi/linux/lsm.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
@@ -42,6 +43,18 @@ struct security_hook_heads {
#undef LSM_HOOK
} __randomize_layout;
+/**
+ * struct lsm_id - Identify a Linux Security Module.
+ * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @id: LSM ID number from uapi/linux/lsm.h
+ *
+ * Contains the information that identifies the LSM.
+ */
+struct lsm_id {
+ const char *name;
+ u64 id;
+};
+
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
@@ -50,7 +63,7 @@ struct security_hook_list {
struct hlist_node list;
struct hlist_head *head;
union security_list_options hook;
- const char *lsm;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
@@ -104,7 +117,7 @@ extern struct security_hook_heads security_hook_heads;
extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- const char *lsm);
+ const struct lsm_id *lsmid);
#define LSM_FLAG_LEGACY_MAJOR BIT(0)
#define LSM_FLAG_EXCLUSIVE BIT(1)
diff --git a/include/linux/maple.h b/include/linux/maple.h
index 9b140272ee..9aae44efcf 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -5,7 +5,6 @@
#include <mach/maple.h>
struct device;
-extern struct bus_type maple_bus_type;
/* Maple Bus command and response codes */
enum maple_code {
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index d01e850b57..b3d63123b9 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -256,6 +256,8 @@ struct maple_tree {
struct maple_tree name = MTREE_INIT(name, 0)
#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_lock_nested(mas, subclass) \
+ spin_lock_nested((&(mt)->ma_lock), subclass)
#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
/*
@@ -327,6 +329,9 @@ int mtree_store(struct maple_tree *mt, unsigned long index,
void *entry, gfp_t gfp);
void *mtree_erase(struct maple_tree *mt, unsigned long index);
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+
void mtree_destroy(struct maple_tree *mt);
void __mt_destroy(struct maple_tree *mt);
@@ -345,6 +350,36 @@ static inline bool mtree_empty(const struct maple_tree *mt)
/* Advanced API */
/*
+ * Maple State Status
+ * ma_active means the maple state is pointing to a node and offset and can
+ * continue operating on the tree.
+ * ma_start means we have not searched the tree.
+ * ma_root means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * ma_none means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * ma_pause means the data within the maple state may be stale, restart the
+ * operation
+ * ma_overflow means the search has reached the upper limit of the search
+ * ma_underflow means the search has reached the lower limit of the search
+ * ma_error means there was an error, check the node for the error number.
+ */
+enum maple_status {
+ ma_active,
+ ma_start,
+ ma_root,
+ ma_none,
+ ma_pause,
+ ma_overflow,
+ ma_underflow,
+ ma_error,
+};
+
+/*
* The maple state is defined in the struct ma_state and is used to keep track
* of information during operations, and even between operations when using the
* advanced API.
@@ -376,6 +411,13 @@ static inline bool mtree_empty(const struct maple_tree *mt)
* When returning a value the maple state index and last respectively contain
* the start and end of the range for the entry. Ranges are inclusive in the
* Maple Tree.
+ *
+ * The status of the state is used to determine how the next action should treat
+ * the state. For instance, if the status is ma_start then the next action
+ * should start at the root of the tree and walk down. If the status is
+ * ma_pause then the node may be stale data and should be discarded. If the
+ * status is ma_overflow, then the last action hit the upper limit.
+ *
*/
struct ma_state {
struct maple_tree *tree; /* The tree we're operating in */
@@ -385,9 +427,11 @@ struct ma_state {
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
unsigned char mas_flags;
+ unsigned char end; /* The end of the node */
};
struct ma_wr_state {
@@ -397,7 +441,6 @@ struct ma_wr_state {
unsigned long r_max; /* range max */
enum maple_type type; /* mas->node type */
unsigned char offset_end; /* The offset where the write ends */
- unsigned char node_end; /* mas->node end */
unsigned long *pivots; /* mas->node->pivots pointer */
unsigned long end_piv; /* The pivot at the offset end */
void __rcu **slots; /* mas->node->slots pointer */
@@ -406,30 +449,16 @@ struct ma_wr_state {
};
#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_lock_nested(mas, subclass) \
+ spin_lock_nested(&((mas)->tree->ma_lock), subclass)
#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
-
/*
* Special values for ma_state.node.
- * MAS_START means we have not searched the tree.
- * MAS_ROOT means we have searched the tree and the entry we found lives in
- * the root of the tree (ie it has index 0, length 1 and is the only entry in
- * the tree).
- * MAS_NONE means we have searched the tree and there is no node in the
- * tree for this entry. For example, we searched for index 1 in an empty
- * tree. Or we have a tree which points to a full leaf node and we
- * searched for an entry which is larger than can be contained in that
- * leaf node.
* MA_ERROR represents an errno. After dropping the lock and attempting
* to resolve the error, the walk would have to be restarted from the
* top of the tree as the tree may have been modified.
*/
-#define MAS_START ((struct maple_enode *)1UL)
-#define MAS_ROOT ((struct maple_enode *)5UL)
-#define MAS_NONE ((struct maple_enode *)9UL)
-#define MAS_PAUSE ((struct maple_enode *)17UL)
-#define MAS_OVERFLOW ((struct maple_enode *)33UL)
-#define MAS_UNDERFLOW ((struct maple_enode *)65UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
@@ -438,7 +467,8 @@ struct ma_wr_state {
.tree = mt, \
.index = first, \
.last = end, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
.alloc = NULL, \
@@ -469,7 +499,6 @@ void *mas_find_range(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
-bool mas_is_err(struct ma_state *mas);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
@@ -498,28 +527,18 @@ static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
mas->tree = tree;
mas->index = mas->last = addr;
mas->max = ULONG_MAX;
- mas->node = MAS_START;
-}
-
-/* Checks if a mas has not found anything */
-static inline bool mas_is_none(const struct ma_state *mas)
-{
- return mas->node == MAS_NONE;
+ mas->status = ma_start;
+ mas->node = NULL;
}
-/* Checks if a mas has been paused */
-static inline bool mas_is_paused(const struct ma_state *mas)
+static inline bool mas_is_active(struct ma_state *mas)
{
- return mas->node == MAS_PAUSE;
+ return mas->status == ma_active;
}
-/* Check if the mas is pointing to a node or not */
-static inline bool mas_is_active(struct ma_state *mas)
+static inline bool mas_is_err(struct ma_state *mas)
{
- if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
- return true;
-
- return false;
+ return mas->status == ma_error;
}
/**
@@ -532,9 +551,10 @@ static inline bool mas_is_active(struct ma_state *mas)
*
* Context: Any context.
*/
-static inline void mas_reset(struct ma_state *mas)
+static __always_inline void mas_reset(struct ma_state *mas)
{
- mas->node = MAS_START;
+ mas->status = ma_start;
+ mas->node = NULL;
}
/**
@@ -550,6 +570,131 @@ static inline void mas_reset(struct ma_state *mas)
*/
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
/**
* __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
* current location.
@@ -563,6 +708,9 @@ static inline void mas_reset(struct ma_state *mas)
static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
unsigned long last)
{
+ /* Ensure the range starts within the current slot */
+ MAS_WARN_ON(mas, mas_is_active(mas) &&
+ (mas->index > start || mas->last < start));
mas->index = start;
mas->last = last;
}
@@ -580,8 +728,8 @@ static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
static inline
void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
{
+ mas_reset(mas);
__mas_set_range(mas, start, last);
- mas->node = MAS_START;
}
/**
@@ -706,129 +854,4 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
for (__entry = mt_find(__tree, &(__index), __max); \
__entry; __entry = mt_find_after(__tree, &(__index), __max))
-
-#ifdef CONFIG_DEBUG_MAPLE_TREE
-enum mt_dump_format {
- mt_dump_dec,
- mt_dump_hex,
-};
-
-extern atomic_t maple_tree_tests_run;
-extern atomic_t maple_tree_tests_passed;
-
-void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
-void mas_dump(const struct ma_state *mas);
-void mas_wr_dump(const struct ma_wr_state *wr_mas);
-void mt_validate(struct maple_tree *mt);
-void mt_cache_shrink(void);
-#define MT_BUG_ON(__tree, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mt_dump(__tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MAS_BUG_ON(__mas, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_dump(__mas); \
- mt_dump((__mas)->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MAS_WR_BUG_ON(__wrmas, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_wr_dump(__wrmas); \
- mas_dump((__wrmas)->mas); \
- mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-
-#define MT_WARN_ON(__tree, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mt_dump(__tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-
-#define MAS_WARN_ON(__mas, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_dump(__mas); \
- mt_dump((__mas)->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-
-#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
- int ret = !!(__x); \
- atomic_inc(&maple_tree_tests_run); \
- if (ret) { \
- pr_info("WARN at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mas_wr_dump(__wrmas); \
- mas_dump((__wrmas)->mas); \
- mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
- unlikely(ret); \
-})
-#else
-#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
-#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
-#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
-#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
-#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
-#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
-#endif /* CONFIG_DEBUG_MAPLE_TREE */
-
#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 007fd9c3e4..79ceee3c86 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -38,6 +38,7 @@ struct mdio_device {
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ int reset_state;
struct gpio_desc *reset_gpio;
struct reset_control *reset_ctrl;
unsigned int reset_assert_delay;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ccf0176ba3..e208224058 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -125,6 +125,7 @@ unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
phys_addr_t base2, phys_addr_t size2);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
+bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7bdcf3020d..20ff87f8e0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -219,6 +219,12 @@ struct mem_cgroup {
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
unsigned long zswap_max;
+
+ /*
+ * Prevent pages from this memcg from being written back from zswap to
+ * swap, and from being swapped out on zswap store failures.
+ */
+ bool zswap_writeback;
#endif
unsigned long soft_limit;
@@ -324,7 +330,7 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
-#ifdef CONFIG_LRU_GEN
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
#endif
@@ -821,6 +827,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return !memcg || css_tryget(&memcg->css);
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget_online(&memcg->css);
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
@@ -1046,8 +1057,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}
-void mem_cgroup_flush_stats(void);
-void mem_cgroup_flush_stats_ratelimited(void);
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
@@ -1187,6 +1198,11 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ return NULL;
+}
+
static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
@@ -1349,6 +1365,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return true;
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1548,11 +1569,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void mem_cgroup_flush_stats(void)
+static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
}
-static inline void mem_cgroup_flush_stats_ratelimited(void)
+static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
}
@@ -1926,6 +1947,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
#else
static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
{
@@ -1939,6 +1961,11 @@ static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
size_t size)
{
}
+static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return true;
+}
#endif
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 1e39d27bee..69e7819000 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -33,7 +33,7 @@ struct memory_dev_type {
struct kref kref;
};
-struct node_hmem_attrs;
+struct access_coordinate;
#ifdef CONFIG_NUMA
extern bool numa_demotion_enabled;
@@ -45,9 +45,9 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype);
int register_mt_adistance_algorithm(struct notifier_block *nb);
int unregister_mt_adistance_algorithm(struct notifier_block *nb);
int mt_calc_adistance(int node, int *adist);
-int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
const char *source);
-int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist);
+int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
#ifdef CONFIG_MIGRATION
int next_demotion_node(int node);
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
@@ -126,13 +126,13 @@ static inline int mt_calc_adistance(int node, int *adist)
return NOTIFY_DONE;
}
-static inline int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
const char *source)
{
return -EIO;
}
-static inline int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
+static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
{
return -EIO;
}
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 4aae6c06c5..7be1e32e6d 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -51,6 +51,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
+extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
extern void mempool_free(void *element, mempool_t *pool);
/*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 1314d9c5f0..744c830f4b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -196,8 +196,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
@@ -228,16 +226,6 @@ static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
return false;
}
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- return 0;
-}
-
-static inline void vmem_altmap_free(struct vmem_altmap *altmap,
- unsigned long nr_pfns)
-{
-}
-
/* when memremap_pages() is disabled all archs can remap a single page */
static inline unsigned long memremap_compat_align(void)
{
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 311f7d3d23..54444ff2a5 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -405,7 +405,7 @@ enum max77693_haptic_reg {
MAX77693_HAPTIC_REG_END,
};
-/* max77693-pmic LSCNFG configuraton register */
+/* max77693-pmic LSCNFG configuration register */
#define MAX77693_PMIC_LOW_SYS_MASK 0x80
#define MAX77693_PMIC_LOW_SYS_SHIFT 7
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 0bc7454c4d..2fb4db67f1 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -198,7 +198,7 @@ enum max77843_irq_muic {
#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
-/* Max77843 charger insterrupts */
+/* Max77843 charger interrupts */
#define MAX77843_CHG_BYP_I BIT(0)
#define MAX77843_CHG_BATP_I BIT(2)
#define MAX77843_CHG_BAT_I BIT(3)
diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h
index 18363b773d..cb99e16ca9 100644
--- a/include/linux/mfd/si476x-platform.h
+++ b/include/linux/mfd/si476x-platform.h
@@ -10,7 +10,7 @@
#ifndef __SI476X_PLATFORM_H__
#define __SI476X_PLATFORM_H__
-/* It is possible to select one of the four adresses using pins A0
+/* It is possible to select one of the four addresses using pins A0
* and A1 on SI476x */
#define SI476X_I2C_ADDR_1 0x60
#define SI476X_I2C_ADDR_2 0x61
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 701925db75..f67ef0a4e0 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -749,7 +749,7 @@
#define VDDCTRL_ST_SHIFT 0
-/*Register VDDCTRL_OP (0x28) bit definitios */
+/*Register VDDCTRL_OP (0x28) bit definitions */
#define VDDCTRL_OP_CMD_MASK 0x80
#define VDDCTRL_OP_CMD_SHIFT 7
#define VDDCTRL_OP_SEL_MASK 0x7F
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 039943ec4d..d0f9b522f3 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -266,6 +266,7 @@ struct mhi_event_config {
* struct mhi_controller_config - Root MHI controller configuration
* @max_channels: Maximum number of channels supported
* @timeout_ms: Timeout value for operations. 0 means use default
+ * @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
* @buf_len: Size of automatically allocated buffers. 0 means use default
* @num_channels: Number of channels defined in @ch_cfg
* @ch_cfg: Array of defined channels
@@ -277,6 +278,7 @@ struct mhi_event_config {
struct mhi_controller_config {
u32 max_channels;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 buf_len;
u32 num_channels;
const struct mhi_channel_config *ch_cfg;
@@ -330,6 +332,7 @@ struct mhi_controller_config {
* @pm_mutex: Mutex for suspend/resume operation
* @pm_lock: Lock for protecting MHI power management state
* @timeout_ms: Timeout in ms for state transitions
+ * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
* @pm_state: MHI power management state
* @db_access: DB access states
* @ee: MHI device execution environment
@@ -419,6 +422,7 @@ struct mhi_controller {
struct mutex pm_mutex;
rwlock_t pm_lock;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 pm_state;
u32 db_access;
enum mhi_ee_type ee;
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index 96f3a13354..11bf3212f7 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -51,14 +51,23 @@ struct mhi_ep_db_info {
/**
* struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @mhi_dev: MHI device associated with this buffer
* @dev_addr: Address of the buffer in endpoint
* @host_addr: Address of the bufffer in host
* @size: Size of the buffer
+ * @code: Transfer completion code
+ * @cb: Callback to be executed by controller drivers after transfer completion (async)
+ * @cb_buf: Opaque buffer to be passed to the callback
*/
struct mhi_ep_buf_info {
+ struct mhi_ep_device *mhi_dev;
void *dev_addr;
u64 host_addr;
size_t size;
+ int code;
+
+ void (*cb)(struct mhi_ep_buf_info *buf_info);
+ void *cb_buf;
};
/**
@@ -94,8 +103,10 @@ struct mhi_ep_buf_info {
* @raise_irq: CB function for raising IRQ to the host
* @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
- * @read_from_host: CB function for reading from host memory from endpoint
- * @write_to_host: CB function for writing to host memory from endpoint
+ * @read_sync: CB function for reading from host memory synchronously
+ * @write_sync: CB function for writing to host memory synchronously
+ * @read_async: CB function for reading from host memory asynchronously
+ * @write_async: CB function for writing to host memory asynchronously
* @mhi_state: MHI Endpoint state
* @max_chan: Maximum channels supported by the endpoint controller
* @mru: MRU (Maximum Receive Unit) value of the endpoint controller
@@ -149,8 +160,10 @@ struct mhi_ep_cntrl {
void __iomem **virt, size_t size);
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
void __iomem *virt, size_t size);
- int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
- int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
enum mhi_state mhi_state;
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
index fa940bbaf8..26b04f73f2 100644
--- a/include/linux/mii_timestamper.h
+++ b/include/linux/mii_timestamper.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <linux/net_tstamp.h>
struct phy_device;
@@ -51,7 +52,8 @@ struct mii_timestamper {
struct sk_buff *skb, int type);
int (*hwtstamp)(struct mii_timestamper *mii_ts,
- struct ifreq *ifreq);
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
void (*link_state)(struct mii_timestamper *mii_ts,
struct phy_device *phydev);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 820bca965f..01275c6e84 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -918,7 +918,7 @@ static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
-static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe)
{
return cqe->l4_l3_hdr_type & 0x1;
}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index d2b8d4a74a..41f03b3524 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -150,6 +150,7 @@ enum {
MLX5_REG_MTPPSE = 0x9054,
MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
+ MLX5_REG_MPIR = 0x9059,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
@@ -678,6 +679,9 @@ struct mlx5e_resources {
struct mlx5_td td;
u32 mkey;
struct mlx5_sq_bfreg bfreg;
+#define MLX5_MAX_NUM_TC 8
+ u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+ bool tisn_valid;
} hw_objs;
struct net_device *uplink_netdev;
struct mutex uplink_netdev_lock;
@@ -688,6 +692,7 @@ enum mlx5_sw_icm_type {
MLX5_SW_ICM_TYPE_STEERING,
MLX5_SW_ICM_TYPE_HEADER_MODIFY,
MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
+ MLX5_SW_ICM_TYPE_SW_ENCAP,
};
#define MLX5_MAX_RESERVED_GIDS 8
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 950d2431a5..df73a2ccc9 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -7,6 +7,7 @@
#define _MLX5_ESWITCH_
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <net/devlink.h>
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
@@ -210,4 +211,11 @@ static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
}
+/* The returned number is valid only when the dev is eswitch manager. */
+static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf_esw_manager(dev) ?
+ MLX5_VPORT_ECPF : MLX5_VPORT_PF;
+}
+
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index bfc8320fb4..486b749205 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -435,7 +435,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_table_modify[0x1];
u8 reformat[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x1];
+ u8 reset_root_to_default[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
u8 reserved_at_c[0x1];
@@ -1193,7 +1193,8 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 log_sw_icm_alloc_granularity[0x6];
u8 log_steering_sw_icm_size[0x8];
- u8 reserved_at_120[0x18];
+ u8 log_indirect_encap_sw_icm_size[0x8];
+ u8 reserved_at_128[0x10];
u8 log_header_modify_pattern_sw_icm_size[0x8];
u8 header_modify_sw_icm_start_address[0x40];
@@ -1204,7 +1205,11 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 memic_operations[0x20];
- u8 reserved_at_220[0x5e0];
+ u8 reserved_at_220[0x20];
+
+ u8 indirect_encap_sw_icm_start_address[0x40];
+
+ u8 reserved_at_280[0x580];
};
struct mlx5_ifc_device_event_cap_bits {
@@ -1236,7 +1241,8 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
u8 reserved_at_c0[0x13];
u8 desc_group_mkey_supported[0x1];
- u8 reserved_at_d4[0xc];
+ u8 freeze_to_rdy_supported[0x1];
+ u8 reserved_at_d5[0xb];
u8 reserved_at_e0[0x20];
@@ -1801,7 +1807,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3e8[0x2];
+ u8 reserved_at_3e8[0x1];
+ u8 silent_mode[0x1];
u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
@@ -1818,7 +1825,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_460[0x1];
u8 ats[0x1];
- u8 reserved_at_462[0x1];
+ u8 cross_vhca_rqt[0x1];
u8 log_max_uctx[0x5];
u8 reserved_at_468[0x1];
u8 crypto[0x1];
@@ -1943,6 +1950,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
enum {
MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000,
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE = (1ULL << 20),
};
enum {
@@ -1992,7 +2000,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_260[0x120];
u8 reserved_at_380[0x10];
u8 ec_vf_vport_base[0x10];
- u8 reserved_at_3a0[0x460];
+
+ u8 reserved_at_3a0[0x10];
+ u8 max_rqt_vhca_id[0x10];
+
+ u8 reserved_at_3c0[0x440];
};
enum mlx5_ifc_flow_destination_type {
@@ -2151,6 +2163,13 @@ struct mlx5_ifc_rq_num_bits {
u8 rq_num[0x18];
};
+struct mlx5_ifc_rq_vhca_bits {
+ u8 reserved_at_0[0x8];
+ u8 rq_num[0x18];
+ u8 reserved_at_20[0x10];
+ u8 rq_vhca_id[0x10];
+};
+
struct mlx5_ifc_mac_address_layout_bits {
u8 reserved_at_0[0x10];
u8 mac_addr_47_32[0x10];
@@ -3901,7 +3920,10 @@ struct mlx5_ifc_rqtc_bits {
u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[];
+ union {
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_num_bits, rq_num);
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_vhca_bits, rq_vhca);
+ };
};
enum {
@@ -4014,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10];
- u8 reserved_at_60[0xd0];
+ u8 reserved_at_60[0xa0];
+
+ u8 reserved_at_100[0x1];
+ u8 sd_group[0x3];
+ u8 reserved_at_104[0x1c];
+ u8 reserved_at_120[0x10];
u8 mtu[0x10];
u8 system_image_guid[0x40];
@@ -4744,7 +4771,10 @@ struct mlx5_ifc_set_l2_table_entry_in_bits {
u8 reserved_at_c0[0x20];
- u8 reserved_at_e0[0x13];
+ u8 reserved_at_e0[0x10];
+ u8 silent_mode_valid[0x1];
+ u8 silent_mode[0x1];
+ u8 reserved_at_f2[0x1];
u8 vlan_valid[0x1];
u8 vlan[0xc];
@@ -10089,6 +10119,19 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
+struct mlx5_ifc_mpir_reg_bits {
+ u8 sdm[0x1];
+ u8 reserved_at_1[0x1b];
+ u8 host_buses[0x4];
+
+ u8 reserved_at_20[0x20];
+
+ u8 local_port[0x8];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
enum {
MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
@@ -10103,7 +10146,10 @@ enum {
struct mlx5_ifc_mtutc_reg_bits {
u8 reserved_at_0[0x5];
u8 freq_adj_units[0x3];
- u8 reserved_at_8[0x14];
+ u8 reserved_at_8[0x3];
+ u8 log_max_freq_adjustment[0x5];
+
+ u8 reserved_at_10[0xc];
u8 operation[0x4];
u8 freq_adjustment[0x20];
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index b86d51a855..40371c916c 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -145,6 +145,10 @@ enum {
MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11,
MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14,
};
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index fbb9bf4478..c36cc6d829 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index da5219b48d..adbab01e2e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -994,6 +994,17 @@ static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
return mas_expected_entries(&vmi->mas, count);
}
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ __mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
/* Free any unused preallocations */
static inline void vma_iter_free(struct vma_iterator *vmi)
{
@@ -1193,14 +1204,16 @@ static inline void page_mapcount_reset(struct page *page)
* a large folio, it includes the number of times this page is mapped
* as part of that folio.
*
- * The result is undefined for pages which cannot be mapped into userspace.
- * For example SLAB or special types of pages. See function page_has_type().
- * They use this field in struct page differently.
+ * Will report 0 for pages which cannot be mapped into userspace, eg
+ * slab, page tables and similar.
*/
static inline int page_mapcount(struct page *page)
{
int mapcount = atomic_read(&page->_mapcount) + 1;
+ /* Handle page_has_type() pages */
+ if (mapcount < 0)
+ mapcount = 0;
if (unlikely(PageCompound(page)))
mapcount += folio_entire_mapcount(page_folio(page));
@@ -1804,7 +1817,7 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
static inline u8 page_kasan_tag(const struct page *page)
{
- u8 tag = 0xff;
+ u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
@@ -1833,7 +1846,7 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag)
static inline void page_kasan_tag_reset(struct page *page)
{
if (kasan_enabled())
- page_kasan_tag_set(page, 0xff);
+ page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
@@ -1953,15 +1966,15 @@ static inline bool page_maybe_dma_pinned(struct page *page)
*
* The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
*/
-static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
- struct page *page)
+static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct folio *folio)
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
- return page_maybe_dma_pinned(page);
+ return folio_maybe_dma_pinned(folio);
}
/**
@@ -2373,7 +2386,8 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int generic_error_remove_page(struct address_space *mapping, struct page *page);
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio);
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long address, struct pt_regs *regs);
@@ -3859,6 +3873,32 @@ void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ /* number of pfns from base where pfn_to_page() is valid */
+ if (altmap)
+ return altmap->reserve + altmap->free;
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+ altmap->alloc -= nr_pfns;
+}
+#else
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+}
+#endif
+
#define VMEMMAP_RESERVE_NR 2
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
@@ -3904,6 +3944,7 @@ enum mf_flags {
MF_UNPOISON = 1 << 4,
MF_SW_SIMULATED = 1 << 5,
MF_NO_RETRY = 1 << 6,
+ MF_MEM_PRE_REMOVE = 1 << 7,
};
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
unsigned long count, int mf_flags);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 950df415d7..8b611e1315 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -125,7 +125,7 @@ struct page {
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
- atomic_long_t pp_frag_count;
+ atomic_long_t pp_ref_count;
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
@@ -401,11 +401,11 @@ FOLIO_MATCH(compound_head, _head_2a);
* @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
* @__page_mapping: Aliases with page->mapping. Unused for page tables.
* @pt_mm: Used for x86 pgds.
- * @pt_frag_refcount: For fragmented page table tracking. Powerpc and s390 only.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
* @_pt_pad_2: Padding to ensure proper alignment.
* @ptl: Lock for the page table.
* @__page_type: Same as page->page_type. Unused for page tables.
- * @_refcount: Same as page refcount. Used for s390 page tables.
+ * @__page_refcount: Same as page refcount.
* @pt_memcg_data: Memcg data. Tracked for page tables here.
*
* This struct overlays struct page for now. Do not modify without a good
@@ -438,7 +438,7 @@ struct ptdesc {
#endif
};
unsigned int __page_type;
- atomic_t _refcount;
+ atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long pt_memcg_data;
#endif
@@ -452,7 +452,7 @@ TABLE_MATCH(compound_head, _pt_pad_1);
TABLE_MATCH(mapping, __page_mapping);
TABLE_MATCH(rcu_head, pt_rcu_head);
TABLE_MATCH(page_type, __page_type);
-TABLE_MATCH(_refcount, _refcount);
+TABLE_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
TABLE_MATCH(memcg_data, pt_memcg_data);
#endif
@@ -730,6 +730,7 @@ struct mm_cid {
#endif
struct kioctx_table;
+struct iommu_mm_data;
struct mm_struct {
struct {
/*
@@ -941,8 +942,8 @@ struct mm_struct {
#endif
struct work_struct async_put_work;
-#ifdef CONFIG_IOMMU_SVA
- u32 pasid;
+#ifdef CONFIG_IOMMU_MM_DATA
+ struct iommu_mm_data *iommu_mm;
#endif
#ifdef CONFIG_KSM
/*
@@ -961,7 +962,7 @@ struct mm_struct {
*/
unsigned long ksm_zero_pages;
#endif /* CONFIG_KSM */
-#ifdef CONFIG_LRU_GEN
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
/* this mm_struct is on lru_gen_mm_list */
struct list_head list;
@@ -976,7 +977,7 @@ struct mm_struct {
struct mem_cgroup *memcg;
#endif
} lru_gen;
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
} __randomize_layout;
/*
@@ -1014,11 +1015,13 @@ struct lru_gen_mm_list {
spinlock_t lock;
};
+#endif /* CONFIG_LRU_GEN */
+
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+
void lru_gen_add_mm(struct mm_struct *mm);
void lru_gen_del_mm(struct mm_struct *mm);
-#ifdef CONFIG_MEMCG
void lru_gen_migrate_mm(struct mm_struct *mm);
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -1039,7 +1042,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-#else /* !CONFIG_LRU_GEN */
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
static inline void lru_gen_add_mm(struct mm_struct *mm)
{
@@ -1049,11 +1052,9 @@ static inline void lru_gen_del_mm(struct mm_struct *mm)
{
}
-#ifdef CONFIG_MEMCG
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
}
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -1063,7 +1064,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
{
}
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
struct vma_iterator {
struct ma_state mas;
@@ -1074,7 +1075,8 @@ struct vma_iterator {
.mas = { \
.tree = &(__mm)->mm_mt, \
.index = __addr, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
}, \
}
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index aa44fff8bb..a2f6179b67 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -9,9 +9,6 @@
*/
#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/atomic.h>
-#include <linux/cpumask.h>
#include <asm/page.h>
@@ -36,6 +33,8 @@ enum {
NR_MM_COUNTERS
};
+struct page;
+
struct page_frag {
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 7b12eebc55..f34407cc27 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -32,6 +32,7 @@ struct mmc_csd {
unsigned int r2w_factor;
unsigned int max_dtr;
unsigned int erase_size; /* In sectors */
+ unsigned int wp_grp_size;
unsigned int read_blkbits;
unsigned int write_blkbits;
unsigned int capacity;
@@ -52,9 +53,6 @@ struct mmc_ext_csd {
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
- u8 max_packed_writes;
- u8 max_packed_reads;
- u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
@@ -306,6 +304,7 @@ struct mmc_card {
unsigned int eg_boundary; /* don't cross erase-group boundaries */
unsigned int erase_arg; /* erase / trim / discard */
u8 erased_byte; /* value of erased bytes */
+ unsigned int wp_grp_size; /* write group size in sectors */
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 6efec0b982..2c7928a509 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -27,7 +27,6 @@ struct mmc_command {
u32 opcode;
u32 arg;
#define MMC_CMD23_ARG_REL_WR (1 << 31)
-#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
u32 resp[4];
unsigned int flags; /* expected response type */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 6f7993803e..cf2bcb5da3 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -257,8 +257,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
-#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
-#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
@@ -321,8 +319,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
-#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
-#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
@@ -402,18 +398,12 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
-#define EXT_CSD_PACKED_EVENT_EN BIT(3)
-
/*
* EXCEPTION_EVENT_STATUS field
*/
#define EXT_CSD_URGENT_BKOPS BIT(0)
#define EXT_CSD_DYNCAP_NEEDED BIT(1)
#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
-#define EXT_CSD_PACKED_FAILURE BIT(3)
-
-#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0)
-#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1)
/*
* BKOPS status level
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2c34e9325c..a497f189d9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -22,18 +22,21 @@
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/local_lock.h>
+#include <linux/zswap.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
-#define MAX_ORDER 10
+#define MAX_PAGE_ORDER 10
#else
-#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
+#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
-#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
+#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
+
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
* costly to service. That is between allocation orders which should
@@ -95,7 +98,7 @@ static inline bool migratetype_is_mergeable(int mt)
}
#define for_each_migratetype_order(order, type) \
- for (order = 0; order <= MAX_ORDER; order++) \
+ for (order = 0; order < NR_PAGE_ORDERS; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
@@ -207,6 +210,10 @@ enum node_stat_item {
PGPROMOTE_SUCCESS, /* promote successfully */
PGPROMOTE_CANDIDATE, /* candidate pages to promote */
#endif
+ /* PGDEMOTE_*: pages demoted */
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
NR_VM_NODE_STAT_ITEMS
};
@@ -435,14 +442,12 @@ struct lru_gen_folio {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
-#ifdef CONFIG_MEMCG
/* the memcg generation this lru_gen_folio belongs to */
u8 gen;
/* the list segment this lru_gen_folio belongs to */
u8 seg;
/* per-node lru_gen_folio list for global reclaim */
struct hlist_nulls_node list;
-#endif
};
enum {
@@ -488,11 +493,6 @@ struct lru_gen_mm_walk {
bool force_scan;
};
-void lru_gen_init_lruvec(struct lruvec *lruvec);
-void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
-
-#ifdef CONFIG_MEMCG
-
/*
* For each node, memcgs are divided into two generations: the old and the
* young. For each generation, memcgs are randomly sharded into multiple bins
@@ -550,6 +550,8 @@ struct lru_gen_memcg {
};
void lru_gen_init_pgdat(struct pglist_data *pgdat);
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
@@ -558,19 +560,6 @@ void lru_gen_offline_memcg(struct mem_cgroup *memcg);
void lru_gen_release_memcg(struct mem_cgroup *memcg);
void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
-#else /* !CONFIG_MEMCG */
-
-#define MEMCG_NR_GENS 1
-
-struct lru_gen_memcg {
-};
-
-static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
-{
-}
-
-#endif /* CONFIG_MEMCG */
-
#else /* !CONFIG_LRU_GEN */
static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
@@ -585,8 +574,6 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
}
-#ifdef CONFIG_MEMCG
-
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
@@ -611,8 +598,6 @@ static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
}
-#endif /* CONFIG_MEMCG */
-
#endif /* CONFIG_LRU_GEN */
struct lruvec {
@@ -635,12 +620,15 @@ struct lruvec {
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
struct lru_gen_folio lrugen;
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
+#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
+ struct zswap_lruvec_state zswap_lruvec_state;
};
/* Isolate for asynchronous migration */
@@ -947,10 +935,10 @@ struct zone {
CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
- struct free_area free_area[MAX_ORDER + 1];
+ struct free_area free_area[NR_PAGE_ORDERS];
#ifdef CONFIG_UNACCEPTED_MEMORY
- /* Pages to be accepted. All pages on the list are MAX_ORDER */
+ /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
struct list_head unaccepted_pages;
#endif
@@ -1760,8 +1748,8 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
-#if (MAX_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
-#error Allocator MAX_ORDER exceeds SECTION_SIZE
+#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
#endif
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index b8da2db4ec..cd4d5c8781 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -244,7 +244,4 @@ static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap,
return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid()));
}
-bool check_fsmapping(const struct mnt_idmap *idmap,
- const struct super_block *sb);
-
#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index a98e188cf3..96bc462872 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -540,6 +540,8 @@ struct module {
struct static_call_site *static_call_sites;
#endif
#if IS_ENABLED(CONFIG_KUNIT)
+ int num_kunit_init_suites;
+ struct kunit_suite **kunit_init_suites;
int num_kunit_suites;
struct kunit_suite **kunit_suites;
#endif
@@ -668,7 +670,7 @@ extern void __module_get(struct module *module);
* @module: the module we should check for
*
* Only try to get a module reference count if the module is not being removed.
- * This call will fail if the module is already being removed.
+ * This call will fail if the module is in the process of being removed.
*
* Care must also be taken to ensure the module exists and is alive prior to
* usage of this call. This can be gauranteed through two means:
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 4fa9726bc3..bfb85fd13e 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -385,6 +385,8 @@ extern bool parameq(const char *name1, const char *name2);
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
+typedef int (*parse_unknown_fn)(char *param, char *val, const char *doing, void *arg);
+
/* Called on module insert or kernel boot */
extern char *parse_args(const char *name,
char *args,
@@ -392,9 +394,7 @@ extern char *parse_args(const char *name,
unsigned num,
s16 level_min,
s16 level_max,
- void *arg,
- int (*unknown)(char *param, char *val,
- const char *doing, void *arg));
+ void *arg, parse_unknown_fn unknown);
/* Called by module remove. */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/mount.h b/include/linux/mount.h
index ac3dd28761..c34c18b4e8 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -50,8 +50,7 @@ struct path;
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
- MNT_CURSOR)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
#define MNT_INTERNAL 0x4000
@@ -65,7 +64,7 @@ struct path;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
-#define MNT_CURSOR 0x10000000
+#define MNT_ONRB 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
index 79184948fa..ac577699ed 100644
--- a/include/linux/moxtet.h
+++ b/include/linux/moxtet.h
@@ -35,8 +35,6 @@ enum turris_mox_module_id {
#define MOXTET_NIRQS 16
-extern struct bus_type moxtet_type;
-
struct moxtet {
struct device *dev;
struct mutex lock;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 9d0fc5109a..e84522e313 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1003,6 +1003,8 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
* @cs: the CS line to select for this NAND operation
+ * @deassert_wp: set to true when the operation requires the WP pin to be
+ * de-asserted (ERASE, PROG, ...)
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
@@ -1010,6 +1012,7 @@ struct nand_op_parser {
*/
struct nand_operation {
unsigned int cs;
+ bool deassert_wp;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
@@ -1021,6 +1024,14 @@ struct nand_operation {
.ninstrs = ARRAY_SIZE(_instrs), \
}
+#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .deassert_wp = true, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
@@ -1104,6 +1115,7 @@ struct nand_controller_ops {
* the bus without restarting an entire read operation nor
* changing the column.
* @supported_op.cont_read: The controller supports sequential cache reads.
+ * @controller_wp: the controller is in charge of handling the WP pin.
*/
struct nand_controller {
struct mutex lock;
@@ -1112,6 +1124,7 @@ struct nand_controller {
unsigned int data_only_read: 1;
unsigned int cont_read: 1;
} supported_op;
+ bool controller_wp;
};
static inline void nand_controller_init(struct nand_controller *nfc)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a33aa9eb9f..7e208d46ba 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,6 +20,7 @@
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
#include <linux/cleanup.h>
+#include <linux/mutex_types.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -33,49 +34,6 @@
#ifndef CONFIG_PREEMPT_RT
-/*
- * Simple, straightforward mutexes with strict semantics:
- *
- * - only one task can hold the mutex at a time
- * - only the owner can unlock the mutex
- * - multiple unlocks are not permitted
- * - recursive locking is not permitted
- * - a mutex object must be initialized via the API
- * - a mutex object must not be initialized via memset or copying
- * - task may not exit with mutex held
- * - memory areas where held locks reside must not be freed
- * - held mutexes must not be reinitialized
- * - mutexes may not be used in hardware or software interrupt
- * contexts such as tasklets and timers
- *
- * These semantics are fully enforced when DEBUG_MUTEXES is
- * enabled. Furthermore, besides enforcing the above rules, the mutex
- * debugging code also implements a number of additional features
- * that make lock debugging easier and faster:
- *
- * - uses symbolic names of mutexes, whenever they are printed in debug output
- * - point-of-acquire tracking, symbolic lookup of function names
- * - list of all locks held in the system, printout of them
- * - owner tracking
- * - detects self-recursing locks and prints out all relevant info
- * - detects multi-task circular deadlocks and prints out all affected
- * locks and tasks (and only those tasks)
- */
-struct mutex {
- atomic_long_t owner;
- raw_spinlock_t wait_lock;
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* Spinner MCS lock */
-#endif
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
#ifdef CONFIG_DEBUG_MUTEXES
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -131,14 +89,6 @@ extern bool mutex_is_locked(struct mutex *lock);
/*
* Preempt-RT variant based on rtmutexes.
*/
-#include <linux/rtmutex.h>
-
-struct mutex {
- struct rt_mutex_base rtmutex;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
#define __MUTEX_INITIALIZER(mutexname) \
{ \
@@ -221,6 +171,7 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
+DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
new file mode 100644
index 0000000000..fdf7f515fd
--- /dev/null
+++ b/include/linux/mutex_types.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MUTEX_TYPES_H
+#define __LINUX_MUTEX_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/lockdep_types.h>
+#include <linux/osq_lock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#ifndef CONFIG_PREEMPT_RT
+
+/*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in hardware or software interrupt
+ * contexts such as tasklets and timers
+ *
+ * These semantics are fully enforced when DEBUG_MUTEXES is
+ * enabled. Furthermore, besides enforcing the above rules, the mutex
+ * debugging code also implements a number of additional features
+ * that make lock debugging easier and faster:
+ *
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+ */
+struct mutex {
+ atomic_long_t owner;
+ raw_spinlock_t wait_lock;
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#endif
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_MUTEXES
+ void *magic;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+ struct rt_mutex_base rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#endif /* CONFIG_PREEMPT_RT */
+
+#endif /* __LINUX_MUTEX_TYPES_H */
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
index ed42bd5f63..0aa4411528 100644
--- a/include/linux/net/intel/i40e_client.h
+++ b/include/linux/net/intel/i40e_client.h
@@ -45,7 +45,7 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[];
+ struct i40e_qv_info qv_info[] __counted_by(num_vectors);
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2564e20946..dba428b3a8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -79,8 +79,6 @@ struct xdp_buff;
struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
-/* DPLL specific */
-struct dpll_pin;
typedef u32 xdp_features_t;
@@ -382,6 +380,7 @@ struct napi_struct {
/* control-path-only fields follow */
struct list_head dev_list;
struct hlist_node napi_hash_node;
+ int irq;
};
enum {
@@ -665,6 +664,10 @@ struct netdev_queue {
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
/*
* write-mostly part
*/
@@ -1324,6 +1327,9 @@ struct netdev_net_notifier {
* int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
* struct netlink_ext_ack *extack);
* Deletes the MDB entry from dev.
+ * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Bulk deletes MDB entries from dev.
* int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
* struct netlink_callback *cb);
* Dumps MDB entries from dev. The first argument (marker) in the netlink
@@ -1606,6 +1612,9 @@ struct net_device_ops {
int (*ndo_mdb_del)(struct net_device *dev,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del_bulk)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int (*ndo_mdb_dump)(struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb);
@@ -1865,6 +1874,7 @@ enum netdev_stat_type {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
+ * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks.
* @ethtool_ops: Management operations
* @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
@@ -2091,6 +2101,78 @@ enum netdev_stat_type {
*/
struct net_device {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/net_device.rst.
+ * Please update the document when adding new fields.
+ */
+
+ /* TX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_tx);
+ unsigned long long priv_flags;
+ const struct net_device_ops *netdev_ops;
+ const struct header_ops *header_ops;
+ struct netdev_queue *_tx;
+ netdev_features_t gso_partial_features;
+ unsigned int real_num_tx_queues;
+ unsigned int gso_max_size;
+ unsigned int gso_ipv4_max_size;
+ u16 gso_max_segs;
+ s16 num_tc;
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+#ifdef CONFIG_XPS
+ struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_egress;
+#endif
+ __cacheline_group_end(net_device_read_tx);
+
+ /* TXRX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_txrx);
+ union {
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+ struct pcpu_dstats __percpu *dstats;
+ };
+ unsigned long state;
+ unsigned int flags;
+ unsigned short hard_header_len;
+ netdev_features_t features;
+ struct inet6_dev __rcu *ip6_ptr;
+ __cacheline_group_end(net_device_read_txrx);
+
+ /* RX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_rx);
+ struct bpf_prog __rcu *xdp_prog;
+ struct list_head ptype_specific;
+ int ifindex;
+ unsigned int real_num_rx_queues;
+ struct netdev_rx_queue *_rx;
+ unsigned long gro_flush_timeout;
+ int napi_defer_hard_irqs;
+ unsigned int gro_max_size;
+ unsigned int gro_ipv4_max_size;
+ rx_handler_func_t __rcu *rx_handler;
+ void __rcu *rx_handler_data;
+ possible_net_t nd_net;
+#ifdef CONFIG_NETPOLL
+ struct netpoll_info __rcu *npinfo;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_ingress;
+#endif
+ __cacheline_group_end(net_device_read_rx);
+
char name[IFNAMSIZ];
struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias;
@@ -2108,14 +2190,12 @@ struct net_device {
* part of the usual set specified in Space.c.
*/
- unsigned long state;
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
struct list_head ptype_all;
- struct list_head ptype_specific;
struct {
struct list_head upper;
@@ -2123,31 +2203,18 @@ struct net_device {
} adj_list;
/* Read-mostly cache-line for fast-path access */
- unsigned int flags;
xdp_features_t xdp_features;
- unsigned long long priv_flags;
- const struct net_device_ops *netdev_ops;
const struct xdp_metadata_ops *xdp_metadata_ops;
- int ifindex;
+ const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
unsigned short gflags;
- unsigned short hard_header_len;
- /* Note : dev->mtu is often read without holding a lock.
- * Writers usually hold RTNL.
- * It is recommended to use READ_ONCE() to annotate the reads,
- * and to use WRITE_ONCE() to annotate the writes.
- */
- unsigned int mtu;
- unsigned short needed_headroom;
unsigned short needed_tailroom;
- netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
- netdev_features_t gso_partial_features;
unsigned int min_mtu;
unsigned int max_mtu;
@@ -2185,8 +2252,6 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
- const struct header_ops *header_ops;
-
unsigned char operstate;
unsigned char link_mode;
@@ -2227,9 +2292,7 @@ struct net_device {
/* Protocol-specific pointers */
-
struct in_device __rcu *ip_ptr;
- struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
@@ -2264,26 +2327,13 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
const unsigned char *dev_addr;
- struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
- unsigned int real_num_rx_queues;
-
- struct bpf_prog __rcu *xdp_prog;
- unsigned long gro_flush_timeout;
- int napi_defer_hard_irqs;
#define GRO_LEGACY_MAX_SIZE 65536u
/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
* and shinfo->gso_segs is a 16bit field.
*/
#define GRO_MAX_SIZE (8 * 65535u)
- unsigned int gro_max_size;
- unsigned int gro_ipv4_max_size;
unsigned int xdp_zc_max_segs;
- rx_handler_func_t __rcu *rx_handler;
- void __rcu *rx_handler_data;
-#ifdef CONFIG_NET_XGRESS
- struct bpf_mprog_entry __rcu *tcx_ingress;
-#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
struct nf_hook_entries __rcu *nf_hooks_ingress;
@@ -2298,25 +2348,13 @@ struct net_device {
/*
* Cache lines mostly used on transmit path
*/
- struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
- unsigned int real_num_tx_queues;
struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
-#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
-#endif
-#ifdef CONFIG_NET_XGRESS
- struct bpf_mprog_entry __rcu *tcx_egress;
-#endif
-#ifdef CONFIG_NETFILTER_EGRESS
- struct nf_hook_entries __rcu *nf_hooks_egress;
-#endif
-
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
@@ -2355,22 +2393,11 @@ struct net_device {
bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);
-#ifdef CONFIG_NETPOLL
- struct netpoll_info __rcu *npinfo;
-#endif
-
- possible_net_t nd_net;
-
/* mid-layer private */
void *ml_priv;
enum netdev_ml_priv_type ml_priv_type;
enum netdev_stat_type pcpu_stat_type:8;
- union {
- struct pcpu_lstats __percpu *lstats;
- struct pcpu_sw_netstats __percpu *tstats;
- struct pcpu_dstats __percpu *dstats;
- };
#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
@@ -2395,20 +2422,15 @@ struct net_device {
*/
#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
- unsigned int gso_max_size;
#define TSO_LEGACY_MAX_SIZE 65536
#define TSO_MAX_SIZE UINT_MAX
unsigned int tso_max_size;
- u16 gso_max_segs;
#define TSO_MAX_SEGS U16_MAX
u16 tso_max_segs;
- unsigned int gso_ipv4_max_size;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- s16 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
@@ -2445,7 +2467,11 @@ struct net_device {
struct devlink_port *devlink_port;
#if IS_ENABLED(CONFIG_DPLL)
- struct dpll_pin *dpll_pin;
+ struct dpll_pin __rcu *dpll_pin;
+#endif
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ /** @page_pools: page pools created for this netdevice */
+ struct hlist_head page_pools;
#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2651,6 +2677,15 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type,
+ struct napi_struct *napi);
+
+static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+{
+ napi->irq = irq;
+}
+
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
@@ -3462,6 +3497,16 @@ static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue
#endif
}
+static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
@@ -3964,6 +4009,9 @@ int generic_hwtstamp_get_lower(struct net_device *dev,
int generic_hwtstamp_set_lower(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_cfg,
struct netlink_ext_ack *extack);
+int dev_set_hwtstamp_phylib(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
@@ -3992,17 +4040,6 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
-void netdev_dpll_pin_clear(struct net_device *dev);
-
-static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev)
-{
-#if IS_ENABLED(CONFIG_DPLL)
- return dev->dpll_pin;
-#else
- return NULL;
-#endif
-}
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -4196,6 +4233,15 @@ static inline void netdev_ref_replace(struct net_device *odev,
void linkwatch_fire_event(struct net_device *dev);
/**
+ * linkwatch_sync_dev - sync linkwatch for the given device
+ * @dev: network device to sync linkwatch for
+ *
+ * Sync linkwatch for the given device, removing it from the
+ * pending work list (if queued).
+ */
+void linkwatch_sync_dev(struct net_device *dev);
+
+/**
* netif_carrier_ok - test if carrier present
* @dev: network device
*
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 7834c0be28..61aa48f46d 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -51,7 +51,7 @@ struct nf_ipv6_ops {
u32 (*cookie_init_sequence)(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
int (*cookie_v6_check)(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie);
+ const struct tcphdr *th);
#endif
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
@@ -179,16 +179,16 @@ static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph,
}
static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie)
+ const struct tcphdr *th)
{
#if IS_ENABLED(CONFIG_SYN_COOKIES)
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (v6_ops)
- return v6_ops->cookie_v6_check(iph, th, cookie);
+ return v6_ops->cookie_v6_check(iph, th);
#elif IS_BUILTIN(CONFIG_IPV6)
- return __cookie_v6_check(iph, th, cookie);
+ return __cookie_v6_check(iph, th);
#endif
#endif
return 0;
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index b11a84f6c3..100cbb2612 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -109,11 +109,18 @@ static inline int wait_on_page_fscache_killable(struct page *page)
return folio_wait_private_2_killable(page_folio(page));
}
+/* Marks used on xarray-based buffers */
+#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
+#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
+
enum netfs_io_source {
NETFS_FILL_WITH_ZEROES,
NETFS_DOWNLOAD_FROM_SERVER,
NETFS_READ_FROM_CACHE,
NETFS_INVALID_READ,
+ NETFS_UPLOAD_TO_SERVER,
+ NETFS_WRITE_TO_CACHE,
+ NETFS_INVALID_WRITE,
} __mode(byte);
typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
@@ -129,9 +136,57 @@ struct netfs_inode {
struct fscache_cookie *cache;
#endif
loff_t remote_i_size; /* Size of the remote file */
+ loff_t zero_point; /* Size after which we assume there's no data
+ * on the server */
+ unsigned long flags;
+#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
+#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
+#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
+#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
+};
+
+/*
+ * A netfs group - for instance a ceph snap. This is marked on dirty pages and
+ * pages marked with a group must be flushed before they can be written under
+ * the domain of another group.
+ */
+struct netfs_group {
+ refcount_t ref;
+ void (*free)(struct netfs_group *netfs_group);
};
/*
+ * Information about a dirty page (attached only if necessary).
+ * folio->private
+ */
+struct netfs_folio {
+ struct netfs_group *netfs_group; /* Filesystem's grouping marker (or NULL). */
+ unsigned int dirty_offset; /* Write-streaming dirty data offset */
+ unsigned int dirty_len; /* Write-streaming dirty data length */
+};
+#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+ void *priv = folio_get_private(folio);
+
+ if ((unsigned long)priv & NETFS_FOLIO_INFO)
+ return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
+ return NULL;
+}
+
+static inline struct netfs_group *netfs_folio_group(struct folio *folio)
+{
+ struct netfs_folio *finfo;
+ void *priv = folio_get_private(folio);
+
+ finfo = netfs_folio_info(folio);
+ if (finfo)
+ return finfo->netfs_group;
+ return priv;
+}
+
+/*
* Resources required to do operations on a cache.
*/
struct netfs_cache_resources {
@@ -143,17 +198,24 @@ struct netfs_cache_resources {
};
/*
- * Descriptor for a single component subrequest.
+ * Descriptor for a single component subrequest. Each operation represents an
+ * individual read/write from/to a server, a cache, a journal, etc..
+ *
+ * The buffer iterator is persistent for the life of the subrequest struct and
+ * the pages it points to can be relied on to exist for the duration.
*/
struct netfs_io_subrequest {
struct netfs_io_request *rreq; /* Supervising I/O request */
+ struct work_struct work;
struct list_head rreq_link; /* Link in rreq->subrequests */
+ struct iov_iter io_iter; /* Iterator for this subrequest */
loff_t start; /* Where to start the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
+ unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
enum netfs_io_source source; /* Where to read from/write to */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
@@ -168,6 +230,13 @@ enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_WRITEBACK, /* This write was triggered by writepages */
+ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
+ NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
+ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
+ NETFS_DIO_READ, /* This is a direct I/O read */
+ NETFS_DIO_WRITE, /* This is a direct I/O write */
+ nr__netfs_io_origin
} __mode(byte);
/*
@@ -175,19 +244,34 @@ enum netfs_io_origin {
* operations to a variety of data stores and then stitch the result together.
*/
struct netfs_io_request {
- struct work_struct work;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
struct inode *inode; /* The file being accessed */
struct address_space *mapping; /* The mapping being accessed */
+ struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
+ struct list_head proc_link; /* Link in netfs_iorequests */
struct list_head subrequests; /* Contributory I/O operations */
+ struct iov_iter iter; /* Unencrypted-side iterator */
+ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
+ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
+ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
+ unsigned int rsize; /* Maximum read size (0 for none) */
+ unsigned int wsize; /* Maximum write size (0 for none) */
+ unsigned int subreq_counter; /* Next subreq->debug_index */
atomic_t nr_outstanding; /* Number of ops in progress */
atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
size_t submitted; /* Amount submitted for I/O so far */
size_t len; /* Length of the request */
+ size_t upper_len; /* Length can be extended to here */
+ size_t transferred; /* Amount to be indicated as transferred */
short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
+ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
loff_t i_size; /* Size of the file */
loff_t start; /* Start position */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
@@ -199,17 +283,25 @@ struct netfs_io_request {
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
+#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
+#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
+#define NETFS_RREQ_BLOCKED 10 /* We blocked */
const struct netfs_request_ops *netfs_ops;
+ void (*cleanup)(struct netfs_io_request *req);
};
/*
* Operations the network filesystem can/must provide to the helpers.
*/
struct netfs_request_ops {
+ unsigned int io_request_size; /* Alloc size for netfs_io_request struct */
+ unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
void (*free_request)(struct netfs_io_request *rreq);
- int (*begin_cache_operation)(struct netfs_io_request *rreq);
+ void (*free_subrequest)(struct netfs_io_subrequest *rreq);
+ /* Read request handling */
void (*expand_readahead)(struct netfs_io_request *rreq);
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -217,6 +309,14 @@ struct netfs_request_ops {
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
struct folio **foliop, void **_fsdata);
void (*done)(struct netfs_io_request *rreq);
+
+ /* Modification handling */
+ void (*update_i_size)(struct inode *inode, loff_t i_size);
+
+ /* Write request handling */
+ void (*create_write_requests)(struct netfs_io_request *wreq,
+ loff_t start, size_t len);
+ void (*invalidate_cache)(struct netfs_io_request *wreq);
};
/*
@@ -229,8 +329,7 @@ enum netfs_read_from_hole {
};
/*
- * Table of operations for access to a cache. This is obtained by
- * rreq->ops->begin_cache_operation().
+ * Table of operations for access to a cache.
*/
struct netfs_cache_ops {
/* End an operation */
@@ -265,8 +364,8 @@ struct netfs_cache_ops {
* actually do.
*/
int (*prepare_write)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size,
- bool no_space_allocated_yet);
+ loff_t *_start, size_t *_len, size_t upper_len,
+ loff_t i_size, bool no_space_allocated_yet);
/* Prepare an on-demand read operation, shortening it to a cached/uncached
* boundary as appropriate.
@@ -284,22 +383,62 @@ struct netfs_cache_ops {
loff_t *_data_start, size_t *_data_len);
};
+/* High-level read API. */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+
+/* High-level write API */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group);
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+/* Address operations API */
struct readahead_control;
void netfs_readahead(struct readahead_control *);
int netfs_read_folio(struct file *, struct folio *);
int netfs_write_begin(struct netfs_inode *, struct file *,
- struct address_space *, loff_t pos, unsigned int len,
- struct folio **, void **fsdata);
-
+ struct address_space *, loff_t pos, unsigned int len,
+ struct folio **, void **fsdata);
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool netfs_release_folio(struct folio *folio, gfp_t gfp);
+int netfs_launder_folio(struct folio *folio);
+
+/* VMA operations API. */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
+
+/* (Sub)request management API. */
void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
bool was_async, enum netfs_sreq_ref_trace what);
-void netfs_stats_show(struct seq_file *);
ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
struct iov_iter *new,
iov_iter_extraction_t extraction_flags);
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs);
+struct netfs_io_subrequest *netfs_create_write_request(
+ struct netfs_io_request *wreq, enum netfs_io_source dest,
+ loff_t start, size_t len, work_func_t worker);
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+ bool was_async);
+void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
+
+int netfs_start_io_read(struct inode *inode);
+void netfs_end_io_read(struct inode *inode);
+int netfs_start_io_write(struct inode *inode);
+void netfs_end_io_write(struct inode *inode);
+int netfs_start_io_direct(struct inode *inode);
+void netfs_end_io_direct(struct inode *inode);
/**
* netfs_inode - Get the netfs inode context from the inode
@@ -317,30 +456,44 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode)
* netfs_inode_init - Initialise a netfslib inode context
* @ctx: The netfs inode to initialise
* @ops: The netfs's operations list
+ * @use_zero_point: True to use the zero_point read optimisation
*
* Initialise the netfs library context struct. This is expected to follow on
* directly from the VFS inode struct.
*/
static inline void netfs_inode_init(struct netfs_inode *ctx,
- const struct netfs_request_ops *ops)
+ const struct netfs_request_ops *ops,
+ bool use_zero_point)
{
ctx->ops = ops;
ctx->remote_i_size = i_size_read(&ctx->inode);
+ ctx->zero_point = LLONG_MAX;
+ ctx->flags = 0;
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
+ /* ->releasepage() drives zero_point */
+ if (use_zero_point) {
+ ctx->zero_point = ctx->remote_i_size;
+ mapping_set_release_always(ctx->inode.i_mapping);
+ }
}
/**
* netfs_resize_file - Note that a file got resized
* @ctx: The netfs inode being resized
* @new_i_size: The new file size
+ * @changed_on_server: The change was applied to the server
*
* Inform the netfs lib that a file got resized so that it can adjust its state.
*/
-static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size,
+ bool changed_on_server)
{
- ctx->remote_i_size = new_i_size;
+ if (changed_on_server)
+ ctx->remote_i_size = new_i_size;
+ if (new_i_size < ctx->zero_point)
+ ctx->zero_point = new_i_size;
}
/**
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 75d7de34c9..1a4445bf2a 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -228,10 +228,12 @@ bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
+
+typedef int (*netlink_filter_fn)(struct sock *dsk, struct sk_buff *skb, void *data);
+
int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
__u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk,
- struct sk_buff *skb, void *data),
+ netlink_filter_fn filter,
void *filter_data);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
@@ -351,5 +353,6 @@ bool netlink_ns_capable(const struct sk_buff *skb,
struct user_namespace *ns, int cap);
bool netlink_capable(const struct sk_buff *skb, int cap);
bool netlink_net_capable(const struct sk_buff *skb, int cap);
+struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast);
#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c11c4db346..ef8d2d618d 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -869,4 +869,26 @@ enum {
RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
};
+enum nfs_cb_opnum4 {
+ OP_CB_GETATTR = 3,
+ OP_CB_RECALL = 4,
+
+ /* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
+
+ /* Callback operations new to NFSv4.2 */
+ OP_CB_OFFLOAD = 15,
+
+ OP_CB_ILLEGAL = 10044,
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 832b7e354b..d59116ac82 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -595,7 +595,6 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
* linux/fs/nfs/write.c
*/
extern int nfs_congestion_kb;
-extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
extern int nfs_update_folio(struct file *file, struct folio *folio,
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index cd797e00fe..92de074e63 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -124,6 +124,7 @@ struct nfs_client {
char cl_ipaddr[48];
struct net *cl_net;
struct list_head pending_cb_stateids;
+ struct rcu_head rcu;
};
/*
@@ -265,6 +266,7 @@ struct nfs_server {
const struct cred *cred;
bool has_sec_mnt_opts;
struct kobject kobj;
+ struct rcu_head rcu;
};
/* Server capabilities */
diff --git a/include/linux/node.h b/include/linux/node.h
index 427a5975cf..dfc004e4be 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -20,20 +20,32 @@
#include <linux/list.h>
/**
- * struct node_hmem_attrs - heterogeneous memory performance attributes
+ * struct access_coordinate - generic performance coordinates container
*
* @read_bandwidth: Read bandwidth in MB/s
* @write_bandwidth: Write bandwidth in MB/s
* @read_latency: Read latency in nanoseconds
* @write_latency: Write latency in nanoseconds
*/
-struct node_hmem_attrs {
+struct access_coordinate {
unsigned int read_bandwidth;
unsigned int write_bandwidth;
unsigned int read_latency;
unsigned int write_latency;
};
+/*
+ * ACCESS_COORDINATE_LOCAL correlates to ACCESS CLASS 0
+ * - access_coordinate between target node and nearest initiator node
+ * ACCESS_COORDINATE_CPU correlates to ACCESS CLASS 1
+ * - access_coordinate between target node and nearest CPU node
+ */
+enum access_coordinate_class {
+ ACCESS_COORDINATE_LOCAL,
+ ACCESS_COORDINATE_CPU,
+ ACCESS_COORDINATE_MAX
+};
+
enum cache_indexing {
NODE_CACHE_DIRECT_MAP,
NODE_CACHE_INDEXED,
@@ -65,8 +77,8 @@ struct node_cache_attrs {
#ifdef CONFIG_HMEM_REPORTING
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
- unsigned access);
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access);
#else
static inline void node_add_cache(unsigned int nid,
struct node_cache_attrs *cache_attrs)
@@ -74,8 +86,8 @@ static inline void node_add_cache(unsigned int nid,
}
static inline void node_set_perf_attrs(unsigned int nid,
- struct node_hmem_attrs *hmem_attrs,
- unsigned access)
+ struct access_coordinate *coord,
+ enum access_coordinate_class access)
{
}
#endif
@@ -137,7 +149,7 @@ extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
- unsigned access);
+ enum access_coordinate_class access);
#else
static inline void node_dev_init(void)
{
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 8d07116caa..b61438313a 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -93,10 +93,10 @@
#include <linux/threads.h>
#include <linux/bitmap.h>
#include <linux/minmax.h>
+#include <linux/nodemask_types.h>
#include <linux/numa.h>
#include <linux/random.h>
-typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
/**
diff --git a/include/linux/nodemask_types.h b/include/linux/nodemask_types.h
new file mode 100644
index 0000000000..6b28d97ea6
--- /dev/null
+++ b/include/linux/nodemask_types.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NODEMASK_TYPES_H
+#define __LINUX_NODEMASK_TYPES_H
+
+#include <linux/bitops.h>
+#include <linux/numa.h>
+
+typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
+
+#endif /* __LINUX_NODEMASK_TYPES_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 771cb02858..5601d14e28 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index bdcd85e622..4d103ac8f5 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -89,8 +89,6 @@ struct nubus_driver {
void (*remove)(struct nubus_board *board);
};
-extern struct bus_type nubus_bus_type;
-
/* Generic NuBus interface functions, modelled after the PCI interface */
#ifdef CONFIG_PROC_FS
extern bool nubus_populate_procfs;
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a904861de8..915033a757 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
+#include <linux/init.h>
#include <linux/types.h>
#ifdef CONFIG_NODES_SHIFT
@@ -22,34 +23,26 @@
#endif
#ifdef CONFIG_NUMA
-#include <linux/printk.h>
#include <asm/sparsemem.h>
/* Generic implementation available */
int numa_nearest_node(int node, unsigned int state);
#ifndef memory_add_physaddr_to_nid
-static inline int memory_add_physaddr_to_nid(u64 start)
-{
- pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
+int memory_add_physaddr_to_nid(u64 start);
#endif
+
#ifndef phys_to_target_node
-static inline int phys_to_target_node(u64 start)
-{
- pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
+int phys_to_target_node(u64 start);
#endif
+
#ifndef numa_fill_memblks
static inline int __init numa_fill_memblks(u64 start, u64 end)
{
return NUMA_NO_MEMBLK;
}
#endif
+
#else /* !CONFIG_NUMA */
static inline int numa_nearest_node(int node, unsigned int state)
{
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 44325c068b..3ef4053ea9 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -20,7 +20,6 @@
#define NVMF_TRSVCID_SIZE 32
#define NVMF_TRADDR_SIZE 256
#define NVMF_TSAS_SIZE 256
-#define NVMF_AUTH_HASH_LEN 64
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
@@ -647,6 +646,7 @@ enum {
NVME_CMD_EFFECTS_NCC = 1 << 2,
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
+ NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14),
NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
@@ -817,12 +817,6 @@ struct nvme_reservation_status_ext {
struct nvme_registered_ctrl_ext regctl_eds[];
};
-enum nvme_async_event_type {
- NVME_AER_TYPE_ERROR = 0,
- NVME_AER_TYPE_SMART = 1,
- NVME_AER_TYPE_NOTICE = 2,
-};
-
/* I/O commands */
enum nvme_opcode {
@@ -1819,7 +1813,7 @@ struct nvme_command {
};
};
-static inline bool nvme_is_fabrics(struct nvme_command *cmd)
+static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
{
return cmd->common.opcode == nvme_fabrics_command;
}
@@ -1838,7 +1832,7 @@ struct nvme_error_slot {
__u8 resv2[24];
};
-static inline bool nvme_is_write(struct nvme_command *cmd)
+static inline bool nvme_is_write(const struct nvme_command *cmd)
{
/*
* What a mess...
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 6ec4b9743e..34c0e58dfa 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -81,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf);
const char *nvmem_dev_name(struct nvmem_device *nvmem);
+size_t nvmem_dev_size(struct nvmem_device *nvmem);
void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
size_t nentries);
@@ -247,7 +248,6 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
const char *id);
struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name);
-struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
#else
static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
const char *id)
@@ -260,12 +260,6 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
{
return ERR_PTR(-EOPNOTSUPP);
}
-
-static inline struct device_node *
-of_nvmem_layout_get_container(struct nvmem_device *nvmem)
-{
- return NULL;
-}
#endif /* CONFIG_NVMEM && CONFIG_OF */
#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index e393083523..f0ba0e0321 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -9,6 +9,7 @@
#ifndef _LINUX_NVMEM_PROVIDER_H
#define _LINUX_NVMEM_PROVIDER_H
+#include <linux/device.h>
#include <linux/device/driver.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -83,6 +84,8 @@ struct nvmem_cell_info {
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
* @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
+ * @fixup_dt_cell_info: Will be called before a cell is added. Can be
+ * used to modify the nvmem_cell_info.
* @keepout: Optional array of keepout ranges (sorted ascending by start).
* @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
@@ -113,6 +116,8 @@ struct nvmem_config {
const struct nvmem_cell_info *cells;
int ncells;
bool add_legacy_fixed_of_cells;
+ void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell);
const struct nvmem_keepout *keepout;
unsigned int nkeepout;
enum nvmem_type type;
@@ -154,15 +159,11 @@ struct nvmem_cell_table {
/**
* struct nvmem_layout - NVMEM layout definitions
*
- * @name: Layout name.
- * @of_match_table: Open firmware match table.
+ * @dev: Device-model layout device.
+ * @nvmem: The underlying NVMEM device
* @add_cells: Will be called if a nvmem device is found which
* has this layout. The function will add layout
* specific cells with nvmem_add_one_cell().
- * @fixup_cell_info: Will be called before a cell is added. Can be
- * used to modify the nvmem_cell_info.
- * @owner: Pointer to struct module.
- * @node: List node.
*
* A nvmem device can hold a well defined structure which can just be
* evaluated during runtime. For example a TLV list, or a list of "name=val"
@@ -170,17 +171,15 @@ struct nvmem_cell_table {
* cells.
*/
struct nvmem_layout {
- const char *name;
- const struct of_device_id *of_match_table;
- int (*add_cells)(struct device *dev, struct nvmem_device *nvmem,
- struct nvmem_layout *layout);
- void (*fixup_cell_info)(struct nvmem_device *nvmem,
- struct nvmem_layout *layout,
- struct nvmem_cell_info *cell);
-
- /* private */
- struct module *owner;
- struct list_head node;
+ struct device dev;
+ struct nvmem_device *nvmem;
+ int (*add_cells)(struct nvmem_layout *layout);
+};
+
+struct nvmem_layout_driver {
+ struct device_driver driver;
+ int (*probe)(struct nvmem_layout *layout);
+ void (*remove)(struct nvmem_layout *layout);
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -197,13 +196,14 @@ void nvmem_del_cell_table(struct nvmem_cell_table *table);
int nvmem_add_one_cell(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info);
-int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner);
-#define nvmem_layout_register(layout) \
- __nvmem_layout_register(layout, THIS_MODULE)
+int nvmem_layout_register(struct nvmem_layout *layout);
void nvmem_layout_unregister(struct nvmem_layout *layout);
-const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
- struct nvmem_layout *layout);
+int nvmem_layout_driver_register(struct nvmem_layout_driver *drv);
+void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv);
+#define module_nvmem_layout_driver(__nvmem_layout_driver) \
+ module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \
+ nvmem_layout_driver_unregister)
#else
@@ -235,17 +235,27 @@ static inline int nvmem_layout_register(struct nvmem_layout *layout)
static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {}
-static inline const void *
-nvmem_layout_get_match_data(struct nvmem_device *nvmem,
- struct nvmem_layout *layout)
+#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+
+/**
+ * of_nvmem_layout_get_container() - Get OF node of layout container
+ *
+ * @nvmem: nvmem device
+ *
+ * Return: a node pointer with refcount incremented or NULL if no
+ * container exists. Use of_node_put() on it when done.
+ */
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
+
+#else /* CONFIG_NVMEM && CONFIG_OF */
+
+static inline struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
{
return NULL;
}
-#endif /* CONFIG_NVMEM */
-
-#define module_nvmem_layout_driver(__layout_driver) \
- module_driver(__layout_driver, nvmem_layout_register, \
- nvmem_layout_unregister)
+#endif /* CONFIG_NVMEM && CONFIG_OF */
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 2c7a3d4bc7..9042bca5bb 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -2,10 +2,7 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
-#include <linux/platform_device.h>
-#include <linux/of_platform.h> /* temporary until merge */
-
-#include <linux/of.h>
+#include <linux/device/driver.h>
struct device;
struct of_device_id;
@@ -40,6 +37,9 @@ static inline int of_dma_configure(struct device *dev,
{
return of_dma_configure_id(dev, np, force_dma, NULL);
}
+
+void of_device_make_bus_id(struct device *dev);
+
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -82,6 +82,9 @@ static inline int of_dma_configure(struct device *dev,
{
return 0;
}
+
+static inline void of_device_make_bus_id(struct device *dev) {}
+
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 9a5e6b410d..e61cbbe12d 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -8,20 +8,19 @@ struct iommu_ops;
#ifdef CONFIG_OF_IOMMU
-extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id);
+extern int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ const u32 *id);
extern void of_iommu_get_resv_regions(struct device *dev,
struct list_head *list);
#else
-static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id)
+static inline int of_iommu_configure(struct device *dev,
+ struct device_node *master_np,
+ const u32 *id)
{
- return NULL;
+ return -ENODEV;
}
static inline void of_iommu_get_resv_regions(struct device *dev,
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index fadfea5754..a2ff1ad48f 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -7,11 +7,11 @@
*/
#include <linux/mod_devicetable.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
struct device;
+struct device_node;
struct of_device_id;
+struct platform_device;
/**
* struct of_dev_auxdata - lookup table entry for device names & platform_data
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 7b5cf4a5cd..51af565229 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -31,8 +31,10 @@
* credit to Christian Biere.
*/
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
-#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
-#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define __type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_max(t) __type_max(typeof(t))
+#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define type_min(t) __type_min(typeof(t))
/*
* Avoids triggering -Wtype-limits compilation warning,
@@ -130,10 +132,10 @@ static inline bool __must_check __must_check_overflow(bool overflow)
#define __overflows_type_constexpr(x, T) ( \
is_unsigned_type(typeof(x)) ? \
- (x) > type_max(typeof(T)) : \
+ (x) > type_max(T) : \
is_unsigned_type(typeof(T)) ? \
- (x) < 0 || (x) > type_max(typeof(T)) : \
- (x) < type_min(typeof(T)) || (x) > type_max(typeof(T)))
+ (x) < 0 || (x) > type_max(T) : \
+ (x) < type_min(T) || (x) > type_max(T))
#define __overflows_type(x, T) ({ \
typeof(T) v = 0; \
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a88e64aceb..68e9973cd6 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -190,7 +190,6 @@ enum pageflags {
/* At least one page in this folio has the hwpoison flag set */
PG_has_hwpoisoned = PG_error,
- PG_hugetlb = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
};
@@ -432,30 +431,51 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
+#define FOLIO_TEST_FLAG_FALSE(name) \
+static inline bool folio_test_##name(const struct folio *folio) \
+{ return false; }
+#define FOLIO_SET_FLAG_NOOP(name) \
+static inline void folio_set_##name(struct folio *folio) { }
+#define FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void folio_clear_##name(struct folio *folio) { }
+#define __FOLIO_SET_FLAG_NOOP(name) \
+static inline void __folio_set_##name(struct folio *folio) { }
+#define __FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void __folio_clear_##name(struct folio *folio) { }
+#define FOLIO_TEST_SET_FLAG_FALSE(name) \
+static inline bool folio_test_set_##name(struct folio *folio) \
+{ return false; }
+#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
+static inline bool folio_test_clear_##name(struct folio *folio) \
+{ return false; }
+
+#define FOLIO_FLAG_FALSE(name) \
+FOLIO_TEST_FLAG_FALSE(name) \
+FOLIO_SET_FLAG_NOOP(name) \
+FOLIO_CLEAR_FLAG_NOOP(name)
+
#define TESTPAGEFLAG_FALSE(uname, lname) \
-static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
+FOLIO_TEST_FLAG_FALSE(lname) \
static inline int Page##uname(const struct page *page) { return 0; }
#define SETPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_set_##lname(struct folio *folio) { } \
+FOLIO_SET_FLAG_NOOP(lname) \
static inline void SetPage##uname(struct page *page) { }
#define CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void folio_clear_##lname(struct folio *folio) { } \
+FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void ClearPage##uname(struct page *page) { }
#define __CLEARPAGEFLAG_NOOP(uname, lname) \
-static inline void __folio_clear_##lname(struct folio *folio) { } \
+__FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void __ClearPage##uname(struct page *page) { }
#define TESTSETFLAG_FALSE(uname, lname) \
-static inline bool folio_test_set_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_SET_FLAG_FALSE(lname) \
static inline int TestSetPage##uname(struct page *page) { return 0; }
#define TESTCLEARFLAG_FALSE(uname, lname) \
-static inline bool folio_test_clear_##lname(struct folio *folio) \
-{ return 0; } \
+FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
static inline int TestClearPage##uname(struct page *page) { return 0; }
#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
@@ -772,19 +792,14 @@ static __always_inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-bool __folio_start_writeback(struct folio *folio, bool keep_write);
-bool set_page_writeback(struct page *page);
+void __folio_start_writeback(struct folio *folio, bool keep_write);
+void set_page_writeback(struct page *page);
#define folio_start_writeback(folio) \
__folio_start_writeback(folio, false)
#define folio_start_writeback_keepwrite(folio) \
__folio_start_writeback(folio, true)
-static inline bool test_set_page_writeback(struct page *page)
-{
- return set_page_writeback(page);
-}
-
static __always_inline bool folio_test_head(struct folio *folio)
{
return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
@@ -834,29 +849,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
#define PG_head_mask ((1UL << PG_head))
-#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(struct page *page);
-SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
-
-/**
- * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
- * @folio: The folio to test.
- *
- * Context: Any context. Caller should have a reference on the folio to
- * prevent it from being turned into a tail page.
- * Return: True for hugetlbfs folios, false for anon folios or folios
- * belonging to other filesystems.
- */
-static inline bool folio_test_hugetlb(struct folio *folio)
-{
- return folio_test_large(folio) &&
- test_bit(PG_hugetlb, folio_flags(folio, 1));
-}
-#else
-TESTPAGEFLAG_FALSE(Huge, hugetlb)
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -913,33 +905,22 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif
/*
- * Check if a page is currently marked HWPoisoned. Note that this check is
- * best effort only and inherently racy: there is no way to synchronize with
- * failing hardware.
- */
-static inline bool is_page_hwpoison(struct page *page)
-{
- if (PageHWPoison(page))
- return true;
- return PageHuge(page) && PageHWPoison(compound_head(page));
-}
-
-/*
* For pages that are never mapped to userspace (and aren't PageSlab),
* page_type may be used. Because it is initialised to -1, we invert the
* sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
* __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of page_mapcount() won't be
+ * low bits so that an underflow or overflow of _mapcount won't be
* mistaken for a page type value.
*/
#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of page_mapcount */
+/* Reserve 0x0000007f to catch underflows of _mapcount */
#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_offline 0x00000100
#define PG_table 0x00000200
#define PG_guard 0x00000400
+#define PG_hugetlb 0x00000800
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -956,35 +937,38 @@ static inline int page_has_type(struct page *page)
return page_type_has_type(page->page_type);
}
+#define FOLIO_TYPE_OPS(lname, fname) \
+static __always_inline bool folio_test_##fname(const struct folio *folio)\
+{ \
+ return folio_test_type(folio, PG_##lname); \
+} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+ folio->page.page_type &= ~PG_##lname; \
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type |= PG_##lname; \
+}
+
#define PAGE_TYPE_OPS(uname, lname, fname) \
+FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
return PageType(page, PG_##lname); \
} \
-static __always_inline int folio_test_##fname(const struct folio *folio)\
-{ \
- return folio_test_type(folio, PG_##lname); \
-} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!PageType(page, 0), page); \
page->page_type &= ~PG_##lname; \
} \
-static __always_inline void __folio_set_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
-} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type |= PG_##lname; \
-} \
-static __always_inline void __folio_clear_##fname(struct folio *folio) \
-{ \
- VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
-} \
+}
/*
* PageBuddy() indicates that the page is free and in the buddy system
@@ -1031,6 +1015,37 @@ PAGE_TYPE_OPS(Table, table, pgtable)
*/
PAGE_TYPE_OPS(Guard, guard, guard)
+#ifdef CONFIG_HUGETLB_PAGE
+FOLIO_TYPE_OPS(hugetlb, hugetlb)
+#else
+FOLIO_TEST_FLAG_FALSE(hugetlb)
+#endif
+
+/**
+ * PageHuge - Determine if the page belongs to hugetlbfs
+ * @page: The page to test.
+ *
+ * Context: Any context.
+ * Return: True for hugetlbfs pages, false for anon pages or pages
+ * belonging to other filesystems.
+ */
+static inline bool PageHuge(const struct page *page)
+{
+ return folio_test_hugetlb(page_folio(page));
+}
+
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
+{
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
+}
+
extern bool is_free_buddy_page(struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1097,7 +1112,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e83c4c0950..3f2409b968 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -41,14 +41,14 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#else /* CONFIG_HUGETLB_PAGE */
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order MAX_ORDER
+#define pageblock_order MAX_PAGE_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 06142ff7f9..2df35e6555 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -206,6 +206,7 @@ enum mapping_flags {
AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
AS_STABLE_WRITES, /* must wait for writeback before modifying
folio contents */
+ AS_UNMOVABLE, /* The mapping cannot be moved, ever */
};
/**
@@ -306,6 +307,22 @@ static inline void mapping_clear_stable_writes(struct address_space *mapping)
clear_bit(AS_STABLE_WRITES, &mapping->flags);
}
+static inline void mapping_set_unmovable(struct address_space *mapping)
+{
+ /*
+ * It's expected unmovable mappings are also unevictable. Compaction
+ * migrate scanner (isolate_migratepages_block()) relies on this to
+ * reduce page locking.
+ */
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+ set_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
+static inline bool mapping_unmovable(struct address_space *mapping)
+{
+ return test_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return mapping->gfp_mask;
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 6b1301e249..3a4860bd27 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -93,6 +93,6 @@ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev);
-int pci_host_common_remove(struct platform_device *pdev);
+void pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 5cb6940310..40ea18f5aa 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -19,13 +19,6 @@ enum pci_epc_interface_type {
SECONDARY_INTERFACE,
};
-enum pci_epc_irq_type {
- PCI_EPC_IRQ_UNKNOWN,
- PCI_EPC_IRQ_LEGACY,
- PCI_EPC_IRQ_MSI,
- PCI_EPC_IRQ_MSIX,
-};
-
static inline const char *
pci_epc_interface_string(enum pci_epc_interface_type type)
{
@@ -79,7 +72,7 @@ struct pci_epc_ops {
u16 interrupts, enum pci_barno, u32 offset);
int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
@@ -122,7 +115,7 @@ struct pci_epc_mem {
* struct pci_epc - represents the PCI EPC device
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
- * list_lock: Mutex for protecting pci_epf list
+ * @list_lock: Mutex for protecting pci_epf list
* @ops: function pointers for performing endpoint operations
* @windows: array of address space of the endpoint controller
* @mem: first window of the endpoint controller, which corresponds to
@@ -229,7 +222,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 3f44b6aec4..77b146e0f6 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -68,7 +68,7 @@ struct pci_epf_ops {
};
/**
- * struct pci_epf_event_ops - Callbacks for capturing the EPC events
+ * struct pci_epc_event_ops - Callbacks for capturing the EPC events
* @core_init: Callback for the EPC initialization complete event
* @link_up: Callback for the EPC link up event
* @link_down: Callback for the EPC link down event
@@ -98,7 +98,7 @@ struct pci_epf_driver {
void (*remove)(struct pci_epf *epf);
struct device_driver driver;
- struct pci_epf_ops *ops;
+ const struct pci_epf_ops *ops;
struct module *owner;
struct list_head epf_group;
const struct pci_epf_device_id *id_table;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index ca0e61c838..213109d3c6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -715,6 +715,7 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
/**
* pci_is_vga - check if the PCI device is a VGA device
+ * @pdev: PCI device
*
* The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
* VGA Base Class and Sub-Classes:
@@ -885,7 +886,6 @@ struct module;
/**
* struct pci_driver - PCI driver structure
- * @node: List of driver structures.
* @name: Driver name.
* @id_table: Pointer to table of device IDs the driver is
* interested in. Most drivers should export this
@@ -940,7 +940,6 @@ struct module;
* own I/O address space.
*/
struct pci_driver {
- struct list_head node;
const char *name;
const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
@@ -1073,11 +1072,13 @@ enum {
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
};
-#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
+#define PCI_IRQ_LEGACY PCI_IRQ_INTX /* Deprecated! Use PCI_IRQ_INTX */
+
/* These external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
@@ -1170,6 +1171,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
struct pci_dev *pci_dev_get(struct pci_dev *dev);
void pci_dev_put(struct pci_dev *dev);
+DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
void pci_remove_bus(struct pci_bus *b);
void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
@@ -1239,6 +1241,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
@@ -1364,6 +1368,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
+int pcie_link_speed_mbps(struct pci_dev *pdev);
void pcie_print_link_status(struct pci_dev *dev);
int pcie_reset_flr(struct pci_dev *dev, bool probe);
int pcie_flr(struct pci_dev *dev);
@@ -1877,6 +1882,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
void pci_dev_lock(struct pci_dev *dev);
int pci_dev_trylock(struct pci_dev *dev);
void pci_dev_unlock(struct pci_dev *dev);
+DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 97cc0baad0..a0c75e467d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2605,6 +2605,8 @@
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_DEVICE_ID_TEHUTI_3009 0x3009
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 143fbc10ec..b3b34f6670 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -60,12 +60,6 @@ struct pmu_hw_events {
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
* already have to allocate this struct per cpu.
*/
@@ -189,4 +183,26 @@ void armpmu_free_irq(int irq, int cpu);
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
#define ARMV8_TRBE_PDEV_NAME "arm,trbe"
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
+
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 9c226adf93..46377e134d 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -215,21 +215,27 @@
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
+#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */
+/* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \
+ ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \
+ ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \
+ ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP)
/*
* PMOVSR: counters overflow flag status reg
*/
-#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
+#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
+#define ARMV8_PMU_OVSR_C BIT(31)
+/* Mask for writable bits is both P and C fields */
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_TH GENMASK_ULL(43, 32) /* arm64 only */
+#define ARMV8_PMU_EVTYPE_TC GENMASK_ULL(63, 61) /* arm64 only */
/*
* Event filters for PMUv3
@@ -244,19 +250,19 @@
/*
* PMUSERENR: user enable reg
*/
-#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
+ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
/* PMMIR_EL1.SLOTS mask */
-#define ARMV8_PMU_SLOTS_MASK 0xff
-
-#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
-#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
-#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
-#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+#define ARMV8_PMU_SLOTS GENMASK(7, 0)
+#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8)
+#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16)
+#define ARMV8_PMU_THWIDTH GENMASK(23, 20)
/*
* This code is really good
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5547ba68e6..d2a15c0c6f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1143,6 +1143,15 @@ static inline bool branch_sample_priv(const struct perf_event *event)
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
+static inline bool branch_sample_counters(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
+}
+
+static inline bool branch_sample_call_stack(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
+}
struct perf_sample_data {
/*
@@ -1177,6 +1186,7 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
+ u64 *br_stack_cntr;
union perf_sample_weight weight;
union perf_mem_data_src data_src;
u64 txn;
@@ -1254,7 +1264,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
- struct perf_branch_stack *brs)
+ struct perf_branch_stack *brs,
+ u64 *brs_cntr)
{
int size = sizeof(u64); /* nr */
@@ -1262,7 +1273,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data,
size += sizeof(u64);
size += brs->nr * sizeof(struct perf_branch_entry);
+ /*
+ * The extension space for counters is appended after the
+ * struct perf_branch_stack. It is used to store the occurrences
+ * of events of each branch.
+ */
+ if (brs_cntr)
+ size += brs->nr * sizeof(u64);
+
data->br_stack = brs;
+ data->br_stack_cntr = brs_cntr;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index af7639c3b0..f6d0e35139 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -184,6 +184,13 @@ static inline int pmd_young(pmd_t pmd)
}
#endif
+#ifndef pmd_dirty
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
/*
* A facility to provide lazy MMU batching. This allows PTE updates and
* page invalidations to be delayed until a call to leave lazy MMU mode
@@ -292,6 +299,27 @@ static inline pmd_t pmdp_get(pmd_t *pmdp)
}
#endif
+#ifndef pudp_get
+static inline pud_t pudp_get(pud_t *pudp)
+{
+ return READ_ONCE(*pudp);
+}
+#endif
+
+#ifndef p4dp_get
+static inline p4d_t p4dp_get(p4d_t *p4dp)
+{
+ return READ_ONCE(*p4dp);
+}
+#endif
+
+#ifndef pgdp_get
+static inline pgd_t pgdp_get(pgd_t *pgdp)
+{
+ return READ_ONCE(*pgdp);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
@@ -375,7 +403,7 @@ static inline bool arch_has_hw_nonleaf_pmd_young(void)
*/
static inline bool arch_has_hw_pte_young(void)
{
- return false;
+ return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
}
#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index bd28595097..684efaeca0 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,7 +327,8 @@ struct mdio_bus_stats {
/**
* struct phy_package_shared - Shared information in PHY packages
- * @addr: Common PHY address used to combine PHYs in one package
+ * @base_addr: Base PHY address of PHY package used to combine PHYs
+ * in one package and for offset calculation of phy_package_read/write
* @refcnt: Number of PHYs connected to this shared data
* @flags: Initialization of PHY package
* @priv_size: Size of the shared private data @priv
@@ -338,7 +339,7 @@ struct mdio_bus_stats {
* phy_package_leave().
*/
struct phy_package_shared {
- int addr;
+ u8 base_addr;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;
@@ -604,6 +605,8 @@ struct macsec_ops;
* @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
* requiring a rerun of the interrupt handler after resume
* @interface: enum phy_interface_t value
+ * @possible_interfaces: bitmap if interface modes that the attached PHY
+ * will switch between depending on media speed.
* @skb: Netlink message for cable diagnostics
* @nest: Netlink nest used for cable diagnostics
* @ehdr: nNtlink header for cable diagnostics
@@ -673,6 +676,7 @@ struct phy_device {
u32 dev_flags;
phy_interface_t interface;
+ DECLARE_PHY_INTERFACE_MASK(possible_interfaces);
/*
* forced speed & duplex (no autoneg)
@@ -1559,9 +1563,11 @@ static inline bool phy_has_txtstamp(struct phy_device *phydev)
return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
}
-static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+static inline int phy_hwtstamp(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, cfg, extack);
}
static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
@@ -1860,6 +1866,7 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev);
int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev);
int genphy_c45_read_eee_abilities(struct phy_device *phydev);
int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
@@ -1969,10 +1976,10 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int addr, size_t priv_size);
+ int base_addr, size_t priv_size);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
@@ -1995,48 +2002,83 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack);
-static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
+static inline int phy_package_address(struct phy_device *phydev,
+ unsigned int addr_offset)
{
struct phy_package_shared *shared = phydev->shared;
+ u8 base_addr = shared->base_addr;
- if (!shared)
+ if (addr_offset >= PHY_MAX_ADDR - base_addr)
return -EIO;
- return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ /* we know that addr will be in the range 0..31 and thus the
+ * implicit cast to a signed int is not a problem.
+ */
+ return base_addr + addr_offset;
}
-static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
+static inline int phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
+
+ return mdiobus_read(phydev->mdio.bus, addr, regnum);
+}
+
+static inline int __phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
- return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ return __mdiobus_read(phydev->mdio.bus, addr, regnum);
}
static inline int phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
static inline int __phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
+int phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
static inline bool __phy_package_set_once(struct phy_device *phydev,
unsigned int b)
{
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 875439ab45..d589f89c61 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -99,72 +99,6 @@ static inline bool phylink_autoneg_inband(unsigned int mode)
}
/**
- * phylink_pcs_neg_mode() - helper to determine PCS inband mode
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
- * @interface: interface mode to be used
- * @advertising: adertisement ethtool link mode mask
- *
- * Determines the negotiation mode to be used by the PCS, and returns
- * one of:
- *
- * - %PHYLINK_PCS_NEG_NONE: interface mode does not support inband
- * - %PHYLINK_PCS_NEG_OUTBAND: an out of band mode (e.g. reading the PHY)
- * will be used.
- * - %PHYLINK_PCS_NEG_INBAND_DISABLED: inband mode selected but autoneg
- * disabled
- * - %PHYLINK_PCS_NEG_INBAND_ENABLED: inband mode selected and autoneg enabled
- *
- * Note: this is for cases where the PCS itself is involved in negotiation
- * (e.g. Clause 37, SGMII and similar) not Clause 73.
- */
-static inline unsigned int phylink_pcs_neg_mode(unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertising)
-{
- unsigned int neg_mode;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- /* These protocols are designed for use with a PHY which
- * communicates its negotiation result back to the MAC via
- * inband communication. Note: there exist PHYs that run
- * with SGMII but do not send the inband data.
- */
- if (!phylink_autoneg_inband(mode))
- neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- else
- neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* 1000base-X is designed for use media-side for Fibre
- * connections, and thus the Autoneg bit needs to be
- * taken into account. We also do this for 2500base-X
- * as well, but drivers may not support this, so may
- * need to override this.
- */
- if (!phylink_autoneg_inband(mode))
- neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- advertising))
- neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
- else
- neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
- break;
-
- default:
- neg_mode = PHYLINK_PCS_NEG_NONE;
- break;
- }
-
- return neg_mode;
-}
-
-/**
* struct phylink_link_state - link state structure
* @advertising: ethtool bitmask containing advertised link modes
* @lp_advertising: ethtool bitmask containing link partner advertised link
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 653a527574..395cacce11 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,18 +2,12 @@
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
+#include <linux/pid_types.h>
#include <linux/rculist.h>
-#include <linux/wait.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
-
-enum pid_type
-{
- PIDTYPE_PID,
- PIDTYPE_TGID,
- PIDTYPE_PGID,
- PIDTYPE_SID,
- PIDTYPE_MAX,
-};
+#include <linux/sched.h>
+#include <linux/wait.h>
/*
* What is struct pid?
@@ -110,9 +104,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
-struct pid_namespace;
-extern struct pid_namespace init_pid_ns;
-
extern int pid_max;
extern int pid_max_min, pid_max_max;
@@ -215,4 +206,127 @@ pid_t pid_vnr(struct pid *pid);
} \
task = tg___; \
} while_each_pid_task(pid, type, task)
+
+static inline struct pid *task_pid(struct task_struct *task)
+{
+ return task->thread_pid;
+}
+
+/*
+ * the helpers to get the task's different pids as they are seen
+ * from various namespaces
+ *
+ * task_xid_nr() : global id, i.e. the id seen from the init namespace;
+ * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * task_xid_nr_ns() : id seen from the ns specified;
+ *
+ * see also pid_nr() etc in include/linux/pid.h
+ */
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+ return tsk->pid;
+}
+
+static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
+}
+
+static inline pid_t task_pid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+}
+
+
+static inline pid_t task_tgid_nr(struct task_struct *tsk)
+{
+ return tsk->tgid;
+}
+
+/**
+ * pid_alive - check that a task structure is not stale
+ * @p: Task structure to be checked.
+ *
+ * Test if a process is not yet dead (at most zombie state)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
+ */
+static inline int pid_alive(const struct task_struct *p)
+{
+ return p->thread_pid != NULL;
+}
+
+static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+}
+
+static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
+}
+
+
+static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+}
+
+static inline pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+}
+
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
+}
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
+}
+
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
+/* Obsolete, do not use: */
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
+}
+
+/**
+ * is_global_init - check if a task structure is init. Since init
+ * is free to have sub-threads we need to check tgid.
+ * @tsk: Task structure to be checked.
+ *
+ * Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
+ */
+static inline int is_global_init(struct task_struct *tsk)
+{
+ return task_tgid_nr(tsk) == 1;
+}
+
#endif /* _LINUX_PID_H */
diff --git a/include/linux/pid_types.h b/include/linux/pid_types.h
new file mode 100644
index 0000000000..c2aee1d91d
--- /dev/null
+++ b/include/linux/pid_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PID_TYPES_H
+#define _LINUX_PID_TYPES_H
+
+enum pid_type {
+ PIDTYPE_PID,
+ PIDTYPE_TGID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+ PIDTYPE_MAX,
+};
+
+struct pid_namespace;
+extern struct pid_namespace init_pid_ns;
+
+#endif /* _LINUX_PID_TYPES_H */
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index ee8803f6ad..673e96df45 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -47,7 +47,7 @@ struct pinctrl_map_mux {
struct pinctrl_map_configs {
const char *group_or_pin;
unsigned long *configs;
- unsigned num_configs;
+ unsigned int num_configs;
};
/**
@@ -154,13 +154,13 @@ struct pinctrl_map;
#ifdef CONFIG_PINCTRL
extern int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps);
+ unsigned int num_maps);
extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
extern void pinctrl_provide_dummies(void);
#else
static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps)
+ unsigned int num_maps)
{
return 0;
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index d74b7a4ea1..a65d3d078e 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -193,17 +193,17 @@ struct pinconf_generic_params {
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
- unsigned *reserved_maps, unsigned *num_maps,
+ unsigned int *reserved_maps, unsigned int *num_maps,
enum pinctrl_map_type type);
int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps, enum pinctrl_map_type type);
+ unsigned int *num_maps, enum pinctrl_map_type type);
void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps)
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_GROUP);
@@ -211,7 +211,7 @@ static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctld
static inline int pinconf_generic_dt_node_to_map_pin(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps)
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_PIN);
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index f8a8215e90..770ec22211 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -40,25 +40,25 @@ struct pinconf_ops {
bool is_generic;
#endif
int (*pin_config_get) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *config);
int (*pin_config_set) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *config);
int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned selector);
+ unsigned int selector);
void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned long config);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 4d252ea00e..9a8189ffd0 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -54,7 +54,7 @@ struct pingroup {
* @drv_data: driver-defined per-pin data. pinctrl core does not touch this
*/
struct pinctrl_pin_desc {
- unsigned number;
+ unsigned int number;
const char *name;
void *drv_data;
};
@@ -82,7 +82,7 @@ struct pinctrl_gpio_range {
unsigned int base;
unsigned int pin_base;
unsigned int npins;
- unsigned const *pins;
+ unsigned int const *pins;
struct gpio_chip *gc;
};
@@ -108,18 +108,18 @@ struct pinctrl_gpio_range {
struct pinctrl_ops {
int (*get_groups_count) (struct pinctrl_dev *pctldev);
const char *(*get_group_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_group_pins) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const unsigned **pins,
- unsigned *num_pins);
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins);
void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
int (*dt_node_to_map) (struct pinctrl_dev *pctldev,
struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps);
+ struct pinctrl_map **map, unsigned int *num_maps);
void (*dt_free_map) (struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
};
/**
@@ -193,7 +193,7 @@ extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *ranges,
- unsigned nranges);
+ unsigned int nranges);
extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
@@ -203,8 +203,8 @@ extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
unsigned int pin);
extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- const char *pin_group, const unsigned **pins,
- unsigned *num_pins);
+ const char *pin_group, const unsigned int **pins,
+ unsigned int *num_pins);
/**
* struct pinfunction - Description about a function
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index a7e370965c..d6f7b58d6a 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -57,26 +57,26 @@ struct pinctrl_gpio_range;
* the pin request.
*/
struct pinmux_ops {
- int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
- int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*request) (struct pinctrl_dev *pctldev, unsigned int offset);
+ int (*free) (struct pinctrl_dev *pctldev, unsigned int offset);
int (*get_functions_count) (struct pinctrl_dev *pctldev);
const char *(*get_function_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_function_groups) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const char * const **groups,
- unsigned *num_groups);
- int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
- unsigned group_selector);
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups);
+ int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset,
+ unsigned int offset,
bool input);
bool strict;
};
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
index 2543c2a1c9..e2e8957683 100644
--- a/include/linux/platform_data/i2c-mux-reg.h
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -17,7 +17,6 @@
* @n_values: Number of multiplexer channels
* @little_endian: Indicating if the register is in little endian
* @write_only: Reading the register is not allowed by hardware
- * @classes: Optional I2C auto-detection classes
* @idle: Value to write to mux when idle
* @idle_in_use: indicate if idle value is in use
* @reg: Virtual address of the register to switch channel
@@ -30,7 +29,6 @@ struct i2c_mux_reg_platform_data {
int n_values;
bool little_endian;
bool write_only;
- const unsigned int *classes;
u32 idle;
bool idle_in_use;
void __iomem *reg;
diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h
index 3e7c64c854..f3f1311cdf 100644
--- a/include/linux/platform_data/keypad-omap.h
+++ b/include/linux/platform_data/keypad-omap.h
@@ -19,9 +19,6 @@ struct omap_kp_platform_data {
bool rep;
unsigned long delay;
bool dbounce;
- /* specific to OMAP242x*/
- unsigned int *row_gpios;
- unsigned int *col_gpios;
};
/* Group (0..3) -- when multiple keys are pressed, only the
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index ea1cc6d829..f177416635 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -20,10 +20,31 @@
#define __MICROCHIP_KSZ_H
#include <linux/types.h>
+#include <linux/platform_data/dsa.h>
+
+enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ8830_CHIP_ID = 0x8830,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9563_CHIP_ID = 0x00956300,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+};
struct ksz_platform_data {
+ /* Must be first such that dsa_register_switch() can access it */
+ struct dsa_chip_data cd;
u32 chip_id;
- u16 enabled_ports;
};
#endif
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index c71a2dd661..5f412a6155 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -105,10 +105,12 @@ struct si5351_clkout_config {
* @clk_xtal: xtal input clock
* @clk_clkin: clkin input clock
* @pll_src: array of pll source clock setting
+ * @pll_reset: array indicating if plls should be reset after setting the rate
* @clkout: array of clkout configuration
*/
struct si5351_platform_data {
enum si5351_pll_src pll_src[2];
+ bool pll_reset[2];
struct si5351_clkout_config clkout[8];
};
diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 41df326583..7f13202931 100644
--- a/include/linux/platform_data/x86/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
@@ -15,6 +15,6 @@ struct lpss_clk_data {
struct clk *clk;
};
-extern int lpss_atom_clk_init(void);
+int lpss_atom_clk_init(void);
#endif /* __CLK_LPSS_H */
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 0f352c1d3c..8c1c8adf7f 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -75,20 +75,10 @@
#include <linux/container_of.h>
#include <linux/list.h>
-#include <linux/types.h>
+#include <linux/plist_types.h>
#include <asm/bug.h>
-struct plist_head {
- struct list_head node_list;
-};
-
-struct plist_node {
- int prio;
- struct list_head prio_list;
- struct list_head node_list;
-};
-
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name
diff --git a/include/linux/plist_types.h b/include/linux/plist_types.h
new file mode 100644
index 0000000000..c37e784330
--- /dev/null
+++ b/include/linux/plist_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_PLIST_TYPES_H
+#define _LINUX_PLIST_TYPES_H
+
+#include <linux/types.h>
+
+struct plist_head {
+ struct list_head node_list;
+};
+
+struct plist_node {
+ int prio;
+ struct list_head prio_list;
+ struct list_head node_list;
+};
+
+#endif /* _LINUX_PLIST_TYPES_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 92a4f69de0..a2f3e53a81 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -681,6 +681,7 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
+ bool async_in_progress:1; /* Owned by the PM core */
unsigned int must_resume:1; /* Owned by the PM core */
unsigned int may_skip_resume:1; /* Set by subsystems */
#else
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index ada3a0ab10..68669ce187 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -91,10 +91,10 @@ static inline int devm_pm_clk_create(struct device *dev)
#endif
#ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
+extern void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb);
#else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
+static inline void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
}
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 34663d0d5c..b97c5e9820 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -118,7 +118,6 @@ struct genpd_power_state {
};
struct genpd_lock_ops;
-struct dev_pm_opp;
struct opp_table;
struct generic_pm_domain {
@@ -146,8 +145,6 @@ struct generic_pm_domain {
int (*power_on)(struct generic_pm_domain *domain);
struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
struct opp_table *opp_table; /* OPP table of the genpd */
- unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
- struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
@@ -348,8 +345,6 @@ int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
-unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -395,13 +390,6 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
-static inline unsigned int
-pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp)
-{
- return 0;
-}
-
static inline int genpd_dev_pm_attach(struct device *dev)
{
return 0;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 18a102174c..76dcb7f37b 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -45,18 +45,6 @@ struct dev_pm_opp_supply {
unsigned long u_watt;
};
-/**
- * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
- * @avg: Average bandwidth corresponding to this OPP (in icc units)
- * @peak: Peak bandwidth corresponding to this OPP (in icc units)
- *
- * This structure stores the bandwidth values for a single interconnect path.
- */
-struct dev_pm_opp_icc_bw {
- u32 avg;
- u32 peak;
-};
-
typedef int (*config_regulators_t)(struct device *dev,
struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
struct regulator **regulators, unsigned int count);
@@ -74,8 +62,10 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
* @supported_hw_count: Number of elements in the array.
* @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
* @genpd_names: Null terminated array of pointers containing names of genpd to
- * attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * attach. Mutually exclusive with required_devs.
+ * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually
+ * exclusive with required_devs.
+ * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs.
*
* This structure contains platform specific OPP configurations for the device.
*/
@@ -90,11 +80,15 @@ struct dev_pm_opp_config {
const char * const *regulator_names;
const char * const *genpd_names;
struct device ***virt_devs;
+ struct device **required_devs;
};
+#define OPP_LEVEL_UNSET U32_MAX
+
/**
* struct dev_pm_opp_data - The data to use to initialize an OPP.
- * @level: The performance level for the OPP.
+ * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if
+ * level field isn't used.
* @freq: The clock rate in Hz for the OPP.
* @u_volt: The voltage in uV for the OPP.
*/
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 267fb8a4fb..ddbe7c3ca4 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -435,7 +435,7 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern struct bus_type pnp_bus_type;
+extern const struct bus_type pnp_bus_type;
#if defined(CONFIG_PNP)
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 851a855d38..1f0ee2459f 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -83,6 +83,8 @@
/********** net/core/skbuff.c **********/
#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+/********** net/ **********/
+#define NET_PTR_POISON ((void *)(0x801 + POISON_POINTER_DELTA))
/********** kernel/bpf/ **********/
#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
@@ -90,4 +92,7 @@
/********** VFS **********/
#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
+/********** lib/stackdepot.c **********/
+#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index d607f51404..dc7b738de2 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -2,40 +2,16 @@
#ifndef _linux_POSIX_TIMERS_H
#define _linux_POSIX_TIMERS_H
-#include <linux/spinlock.h>
+#include <linux/alarmtimer.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/alarmtimer.h>
+#include <linux/posix-timers_types.h>
+#include <linux/spinlock.h>
#include <linux/timerqueue.h>
struct kernel_siginfo;
struct task_struct;
-/*
- * Bit fields within a clockid:
- *
- * The most significant 29 bits hold either a pid or a file descriptor.
- *
- * Bit 2 indicates whether a cpu clock refers to a thread or a process.
- *
- * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
- *
- * A clockid is invalid if bits 2, 1, and 0 are all set.
- */
-#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
-#define CPUCLOCK_PERTHREAD(clock) \
- (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
-
-#define CPUCLOCK_PERTHREAD_MASK 4
-#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
-#define CPUCLOCK_CLOCK_MASK 3
-#define CPUCLOCK_PROF 0
-#define CPUCLOCK_VIRT 1
-#define CPUCLOCK_SCHED 2
-#define CPUCLOCK_MAX 3
-#define CLOCKFD CPUCLOCK_MAX
-#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
{
@@ -109,44 +85,6 @@ static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp)
ctmr->node.expires = exp;
}
-/**
- * posix_cputimer_base - Container per posix CPU clock
- * @nextevt: Earliest-expiration cache
- * @tqhead: timerqueue head for cpu_timers
- */
-struct posix_cputimer_base {
- u64 nextevt;
- struct timerqueue_head tqhead;
-};
-
-/**
- * posix_cputimers - Container for posix CPU timer related data
- * @bases: Base container for posix CPU clocks
- * @timers_active: Timers are queued.
- * @expiry_active: Timer expiry is active. Used for
- * process wide timers to avoid multiple
- * task trying to handle expiry concurrently
- *
- * Used in task_struct and signal_struct
- */
-struct posix_cputimers {
- struct posix_cputimer_base bases[CPUCLOCK_MAX];
- unsigned int timers_active;
- unsigned int expiry_active;
-};
-
-/**
- * posix_cputimers_work - Container for task work based posix CPU timer expiry
- * @work: The task work to be scheduled
- * @mutex: Mutex held around expiry in context of this task work
- * @scheduled: @work has been scheduled already, no further processing
- */
-struct posix_cputimers_work {
- struct callback_head work;
- struct mutex mutex;
- unsigned int scheduled;
-};
-
static inline void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
@@ -179,7 +117,6 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
.bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \
},
#else
-struct posix_cputimers { };
struct cpu_timer { };
#define INIT_CPU_TIMERS(s)
static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
diff --git a/include/linux/posix-timers_types.h b/include/linux/posix-timers_types.h
new file mode 100644
index 0000000000..a4712c1008
--- /dev/null
+++ b/include/linux/posix-timers_types.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _linux_POSIX_TIMERS_TYPES_H
+#define _linux_POSIX_TIMERS_TYPES_H
+
+#include <linux/mutex_types.h>
+#include <linux/timerqueue_types.h>
+#include <linux/types.h>
+
+/*
+ * Bit fields within a clockid:
+ *
+ * The most significant 29 bits hold either a pid or a file descriptor.
+ *
+ * Bit 2 indicates whether a cpu clock refers to a thread or a process.
+ *
+ * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
+ *
+ * A clockid is invalid if bits 2, 1, and 0 are all set.
+ */
+#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
+#define CPUCLOCK_PERTHREAD(clock) \
+ (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
+
+#define CPUCLOCK_PERTHREAD_MASK 4
+#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
+#define CPUCLOCK_CLOCK_MASK 3
+#define CPUCLOCK_PROF 0
+#define CPUCLOCK_VIRT 1
+#define CPUCLOCK_SCHED 2
+#define CPUCLOCK_MAX 3
+#define CLOCKFD CPUCLOCK_MAX
+#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
+
+#ifdef CONFIG_POSIX_TIMERS
+
+/**
+ * posix_cputimer_base - Container per posix CPU clock
+ * @nextevt: Earliest-expiration cache
+ * @tqhead: timerqueue head for cpu_timers
+ */
+struct posix_cputimer_base {
+ u64 nextevt;
+ struct timerqueue_head tqhead;
+};
+
+/**
+ * posix_cputimers - Container for posix CPU timer related data
+ * @bases: Base container for posix CPU clocks
+ * @timers_active: Timers are queued.
+ * @expiry_active: Timer expiry is active. Used for
+ * process wide timers to avoid multiple
+ * task trying to handle expiry concurrently
+ *
+ * Used in task_struct and signal_struct
+ */
+struct posix_cputimers {
+ struct posix_cputimer_base bases[CPUCLOCK_MAX];
+ unsigned int timers_active;
+ unsigned int expiry_active;
+};
+
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work: The task work to be scheduled
+ * @mutex: Mutex held around expiry in context of this task work
+ * @scheduled: @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+ struct callback_head work;
+ struct mutex mutex;
+ unsigned int scheduled;
+};
+
+#else /* CONFIG_POSIX_TIMERS */
+
+struct posix_cputimers { };
+
+#endif /* CONFIG_POSIX_TIMERS */
+
+#endif /* _linux_POSIX_TIMERS_TYPES_H */
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 7c8d65414a..7d8025fb74 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -83,5 +83,6 @@ struct bq27xxx_device_info {
void bq27xxx_battery_update(struct bq27xxx_device_info *di);
int bq27xxx_battery_setup(struct bq27xxx_device_info *di);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di);
+extern const struct dev_pm_ops bq27xxx_battery_battery_pm_ops;
#endif
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index f2ed5b72b3..f7f1e5251c 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/once.h>
-#include <linux/percpu.h>
#include <linux/random.h>
struct rnd_state {
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 9aa6358a1a..7233e9cf1b 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -9,7 +9,7 @@
#include <linux/linkage.h>
#include <linux/cleanup.h>
-#include <linux/list.h>
+#include <linux/types.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -360,7 +360,9 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier);
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
- INIT_HLIST_NODE(&notifier->link);
+ /* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
+ notifier->link.next = NULL;
+ notifier->link.pprev = NULL;
notifier->ops = ops;
}
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 8ef499ab3c..955e318600 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -273,6 +273,8 @@ static inline void printk_trigger_flush(void)
}
#endif
+bool this_cpu_in_panic(void);
+
#ifdef CONFIG_SMP
extern int __printk_cpu_sync_try_get(void);
extern void __printk_cpu_sync_wait(void);
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index de407e7c3b..0b2a898544 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -65,6 +65,7 @@ struct proc_fs_info {
kgid_t pid_gid;
enum proc_hidepid hide_pid;
enum proc_pidonly pidonly;
+ struct rcu_head rcu;
};
static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
diff --git a/include/linux/property.h b/include/linux/property.h
index 9f2585d705..e6516d0b7d 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -80,6 +80,16 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode,
bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+static inline bool fwnode_device_is_big_endian(const struct fwnode_handle *fwnode)
+{
+ if (fwnode_property_present(fwnode, "big-endian"))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ fwnode_property_present(fwnode, "native-endian"))
+ return true;
+ return false;
+}
+
static inline
bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char *compat)
{
@@ -87,6 +97,22 @@ bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char
}
/**
+ * device_is_big_endian - check if a device has BE registers
+ * @dev: Pointer to the struct device
+ *
+ * Returns: true if the device has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the device has a "native-endian" property.
+ * Returns false otherwise.
+ *
+ * Callers would nominally use ioread32be/iowrite32be if
+ * device_is_big_endian() == true, or readl/writel otherwise.
+ */
+static inline bool device_is_big_endian(const struct device *dev)
+{
+ return fwnode_device_is_big_endian(dev_fwnode(dev));
+}
+
+/**
* device_is_compatible - match 'compatible' property of the device with a given string
* @dev: Pointer to the struct device
* @compat: The string to match 'compatible' property with
@@ -98,6 +124,18 @@ static inline bool device_is_compatible(const struct device *dev, const char *co
return fwnode_device_is_compatible(dev_fwnode(dev), compat);
}
+int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char * const *array, size_t n);
+
+static inline
+int device_property_match_property_string(const struct device *dev,
+ const char *propname,
+ const char * const *array, size_t n)
+{
+ return fwnode_property_match_property_string(dev_fwnode(dev), propname, array, n);
+}
+
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
@@ -109,6 +147,7 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *fwnode_get_name(const struct fwnode_handle *fwnode);
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
+bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name);
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
@@ -489,6 +528,13 @@ struct software_node {
const struct property_entry *properties;
};
+#define SOFTWARE_NODE(_name_, _properties_, _parent_) \
+ (struct software_node) { \
+ .name = _name_, \
+ .properties = _properties_, \
+ .parent = _parent_, \
+ }
+
bool is_software_node(const struct fwnode_handle *fwnode);
const struct software_node *
to_software_node(const struct fwnode_handle *fwnode);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index cda3597b84..fcc2c4496f 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -69,7 +69,6 @@ struct pwm_state {
* @label: name of the PWM device
* @flags: flags associated with the PWM device
* @hwpwm: per-chip relative index of the PWM device
- * @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
* @args: PWM arguments
* @state: last applied state
@@ -79,7 +78,6 @@ struct pwm_device {
const char *label;
unsigned long flags;
unsigned int hwpwm;
- unsigned int pwm;
struct pwm_chip *chip;
struct pwm_args args;
@@ -93,8 +91,8 @@ struct pwm_device {
* @state: state to fill with the current PWM state
*
* The returned PWM state represents the state that was applied by a previous call to
- * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to
- * hardware. If pwm_apply_state() was never called, this returns either the current hardware
+ * pwm_apply_might_sleep(). Drivers may have to slightly tweak that state before programming it to
+ * hardware. If pwm_apply_might_sleep() was never called, this returns either the current hardware
* state (if supported) or the default settings.
*/
static inline void pwm_get_state(const struct pwm_device *pwm,
@@ -112,12 +110,6 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
return state.enabled;
}
-static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
-{
- if (pwm)
- pwm->state.period = period;
-}
-
static inline u64 pwm_get_period(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -127,12 +119,6 @@ static inline u64 pwm_get_period(const struct pwm_device *pwm)
return state.period;
}
-static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
-{
- if (pwm)
- pwm->state.duty_cycle = duty;
-}
-
static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -158,20 +144,20 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
}
/**
- * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
+ * pwm_init_state() - prepare a new state to be applied with pwm_apply_might_sleep()
* @pwm: PWM device
* @state: state to fill with the prepared PWM state
*
* This functions prepares a state that can later be tweaked and applied
- * to the PWM device with pwm_apply_state(). This is a convenient function
+ * to the PWM device with pwm_apply_might_sleep(). This is a convenient function
* that first retrieves the current PWM state and the replaces the period
* and polarity fields with the reference values defined in pwm->args.
* Once the function returns, you can adjust the ->enabled and ->duty_cycle
- * fields according to your needs before calling pwm_apply_state().
+ * fields according to your needs before calling pwm_apply_might_sleep().
*
* ->duty_cycle is initially set to zero to avoid cases where the current
* ->duty_cycle value exceed the pwm_args->period one, which would trigger
- * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
+ * an error if the user calls pwm_apply_might_sleep() without adjusting ->duty_cycle
* first.
*/
static inline void pwm_init_state(const struct pwm_device *pwm,
@@ -227,7 +213,7 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
*
* pwm_init_state(pwm, &state);
* pwm_set_relative_duty_cycle(&state, 50, 100);
- * pwm_apply_state(pwm, &state);
+ * pwm_apply_might_sleep(pwm, &state);
*
* This functions returns -EINVAL if @duty_cycle and/or @scale are
* inconsistent (@scale == 0 or @duty_cycle > @scale).
@@ -282,32 +268,33 @@ struct pwm_ops {
* @dev: device providing the PWMs
* @ops: callbacks for this PWM controller
* @owner: module providing this chip
- * @base: number of first PWM controlled by this chip
+ * @id: unique number of this PWM chip
* @npwm: number of PWMs controlled by this chip
* @of_xlate: request a PWM device given a device tree PWM specifier
* @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
- * @list: list node for internal use
+ * @atomic: can the driver's ->apply() be called in atomic context
* @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
struct device *dev;
const struct pwm_ops *ops;
struct module *owner;
- int base;
+ unsigned int id;
unsigned int npwm;
struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
+ bool atomic;
/* only used internally by the PWM framework */
- struct list_head list;
struct pwm_device *pwms;
};
#if IS_ENABLED(CONFIG_PWM)
/* PWM user APIs */
-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
@@ -335,7 +322,7 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
state.duty_cycle = duty_ns;
state.period = period_ns;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -356,7 +343,7 @@ static inline int pwm_enable(struct pwm_device *pwm)
return 0;
state.enabled = true;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -375,7 +362,18 @@ static inline void pwm_disable(struct pwm_device *pwm)
return;
state.enabled = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
+}
+
+/**
+ * pwm_might_sleep() - is pwm_apply_atomic() supported?
+ * @pwm: PWM device
+ *
+ * Returns: false if pwm_apply_atomic() can be called from atomic context.
+ */
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+ return !pwm->chip->atomic;
}
/* PWM provider APIs */
@@ -406,16 +404,27 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
#else
-static inline int pwm_apply_state(struct pwm_device *pwm,
- const struct pwm_state *state)
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+ return true;
+}
+
+static inline int pwm_apply_might_sleep(struct pwm_device *pwm,
+ const struct pwm_state *state)
{
might_sleep();
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int pwm_apply_atomic(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ return -EOPNOTSUPP;
}
static inline int pwm_adjust_config(struct pwm_device *pwm)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
@@ -524,7 +533,14 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
state.period = pwm->args.period;
state.usage_power = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
+}
+
+/* only for backwards-compatibility, new code should not use this */
+static inline int pwm_apply_state(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ return pwm_apply_might_sleep(pwm, state);
}
struct pwm_lookup {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 4fa4ef0a17..06cc888819 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -74,7 +74,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
int dquot_alloc_inode(struct inode *inode);
-int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
+void dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
void dquot_free_inode(struct inode *inode);
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
@@ -257,10 +257,9 @@ static inline void __dquot_free_space(struct inode *inode, qsize_t number,
inode_sub_bytes(inode, number);
}
-static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+static inline void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
inode_add_bytes(inode, number);
- return 0;
}
static inline int dquot_reclaim_space_nodirty(struct inode *inode,
@@ -358,14 +357,10 @@ static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
}
-static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
+static inline void dquot_claim_block(struct inode *inode, qsize_t nr)
{
- int ret;
-
- ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
- if (!ret)
- mark_inode_dirty_sync(inode);
- return ret;
+ dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
+ mark_inode_dirty_sync(inode);
}
static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 5d868505a9..6d92b68efb 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -80,7 +80,7 @@ DECLARE_PER_CPU(u32, kstack_offset);
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
u32 offset = raw_cpu_read(kstack_offset); \
- offset ^= (rand); \
+ offset = ror32(offset, 5) ^ (rand); \
raw_cpu_write(kstack_offset, offset); \
} \
} while (0)
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index d29740be48..3dc1e58865 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -355,7 +355,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
})
/**
- * list_next_or_null_rcu - get the first element from a list
+ * list_next_or_null_rcu - get the next element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e1bb04e578..17d7ed5f3a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -34,9 +34,6 @@
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
-#define ulong2long(a) (*(long *)(&(a)))
-#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
-#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
@@ -187,9 +184,9 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
do { \
int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
\
- if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \
+ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
likely(!___rttq_nesting)) { \
- rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \
+ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
} else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
!READ_ONCE((t)->trc_reader_special.b.blocked)) { \
rcu_tasks_trace_qs_blkd(t); \
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 5e0f74f2f8..d07f084880 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -8,6 +8,7 @@
#include <linux/rcupdate.h>
#include <linux/completion.h>
+#include <linux/sched.h>
/*
* Structure allowing asynchronous waiting on RCU.
@@ -55,4 +56,13 @@ do { \
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+static inline void cond_resched_rcu(void)
+{
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
+ rcu_read_unlock();
+ cond_resched();
+ rcu_read_lock();
+#endif
+}
+
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index c4cc3b89ce..abcdde4df6 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -177,7 +177,17 @@ void ctrl_alt_del(void);
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
-void hw_protection_shutdown(const char *reason, int ms_until_forced);
+void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
+
+static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, false);
+}
+
+static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, true);
+}
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca974..85c6df0d1b 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -96,22 +96,11 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/limits.h>
+#include <linux/refcount_types.h>
#include <linux/spinlock_types.h>
struct mutex;
-/**
- * typedef refcount_t - variant of atomic_t specialized for reference counts
- * @refs: atomic_t counter field
- *
- * The counter saturates at REFCOUNT_SATURATED and will not move once
- * there. This avoids wrapping the counter and causing 'spurious'
- * use-after-free bugs.
- */
-typedef struct refcount_struct {
- atomic_t refs;
-} refcount_t;
-
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
#define REFCOUNT_MAX INT_MAX
#define REFCOUNT_SATURATED (INT_MIN / 2)
diff --git a/include/linux/refcount_types.h b/include/linux/refcount_types.h
new file mode 100644
index 0000000000..162004f06e
--- /dev/null
+++ b/include/linux/refcount_types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_REFCOUNT_TYPES_H
+#define _LINUX_REFCOUNT_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * typedef refcount_t - variant of atomic_t specialized for reference counts
+ * @refs: atomic_t counter field
+ *
+ * The counter saturates at REFCOUNT_SATURATED and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free bugs.
+ */
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+#endif /* _LINUX_REFCOUNT_TYPES_H */
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 39b666b40e..4660582a33 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -33,6 +33,7 @@
#include <linux/err.h>
#include <linux/suspend.h>
+#include <regulator/regulator.h>
struct device;
struct notifier_block;
@@ -85,52 +86,6 @@ struct regulator_dev;
#define REGULATOR_MODE_STANDBY 0x8
/*
- * Regulator notifier events.
- *
- * UNDER_VOLTAGE Regulator output is under voltage.
- * OVER_CURRENT Regulator output current is too high.
- * REGULATION_OUT Regulator output is out of regulation.
- * FAIL Regulator output has failed.
- * OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator forcibly shut down by software.
- * VOLTAGE_CHANGE Regulator voltage changed.
- * Data passed is old voltage cast to (void *).
- * DISABLE Regulator was disabled.
- * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
- * Data passed is "struct pre_voltage_change_data"
- * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
- * Data passed is old voltage cast to (void *).
- * PRE_DISABLE Regulator is about to be disabled
- * ABORT_DISABLE Regulator disable failed for some reason
- *
- * NOTE: These events can be OR'ed together when passed into handler.
- */
-
-#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
-#define REGULATOR_EVENT_OVER_CURRENT 0x02
-#define REGULATOR_EVENT_REGULATION_OUT 0x04
-#define REGULATOR_EVENT_FAIL 0x08
-#define REGULATOR_EVENT_OVER_TEMP 0x10
-#define REGULATOR_EVENT_FORCE_DISABLE 0x20
-#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
-#define REGULATOR_EVENT_DISABLE 0x80
-#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
-#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
-#define REGULATOR_EVENT_PRE_DISABLE 0x400
-#define REGULATOR_EVENT_ABORT_DISABLE 0x800
-#define REGULATOR_EVENT_ENABLE 0x1000
-/*
- * Following notifications should be emitted only if detected condition
- * is such that the HW is likely to still be working but consumers should
- * take a recovery action to prevent problems esacalating into errors.
- */
-#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
-#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
-#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
-#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
-#define REGULATOR_EVENT_WARN_MASK 0x1E000
-
-/*
* Regulator errors that can be queried using regulator_get_error_flags
*
* UNDER_VOLTAGE Regulator output is under voltage.
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4b7eceb382..22a07c0900 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -51,12 +51,7 @@ enum regulator_detection_severity {
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
-{ \
- .min = _min_uV, \
- .min_sel = _min_sel, \
- .max_sel = _max_sel, \
- .step = _step_uV, \
-}
+ LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
/**
* struct regulator_ops - regulator operations.
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 621b7f4a36..0cd76d2647 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -49,6 +49,13 @@ struct regulator;
#define DISABLE_IN_SUSPEND 1
#define ENABLE_IN_SUSPEND 2
+/*
+ * Default time window (in milliseconds) following a critical under-voltage
+ * event during which less critical actions can be safely carried out by the
+ * system.
+ */
+#define REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS 10
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -127,6 +134,8 @@ struct notification_limit {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @system_critical: Set if the regulator is critical to system stability or
+ * functionality.
* @over_current_protection: Auto disable on over current event.
*
* @over_current_detection: Configure over current limits.
@@ -153,6 +162,13 @@ struct notification_limit {
* regulator_active_discharge values are used for
* initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
+ * @uv_less_critical_window_ms: Specifies the time window (in milliseconds)
+ * following a critical under-voltage (UV) event
+ * during which less critical actions can be
+ * safely carried out by the system (for example
+ * logging). After this time window more critical
+ * actions should be done (for example prevent
+ * HW damage).
*/
struct regulation_constraints {
@@ -204,6 +220,7 @@ struct regulation_constraints {
unsigned int settling_time_up;
unsigned int settling_time_down;
unsigned int enable_time;
+ unsigned int uv_less_critical_window_ms;
unsigned int active_discharge;
@@ -214,6 +231,7 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned system_critical:1; /* critical to system stability */
unsigned over_current_protection:1; /* auto disable on over current */
unsigned over_current_detection:1; /* notify on over current */
unsigned over_voltage_detection:1; /* notify on over voltage */
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 980a655944..13f17676c5 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -7,8 +7,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/time64.h>
+struct __kernel_timespec;
struct timespec;
struct old_timespec32;
struct pollfd;
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
index f8f3e958e9..e0135e0ada 100644
--- a/include/linux/resume_user_mode.h
+++ b/include/linux/resume_user_mode.h
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/task_work.h>
#include <linux/memcontrol.h>
+#include <linux/rseq.h>
#include <linux/blk-cgroup.h>
/**
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
index 57467cbf4c..b6f3797277 100644
--- a/include/linux/rhashtable-types.h
+++ b/include/linux/rhashtable-types.h
@@ -12,7 +12,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/workqueue_types.h>
struct rhash_head {
struct rhash_head __rcu *next;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index ded528d23f..dc5ae4e96a 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -99,7 +99,8 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
})
typedef bool (*ring_buffer_cond_fn)(void *data);
-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
+ ring_buffer_cond_fn cond, void *data);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table, int full);
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
@@ -142,6 +143,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
@@ -192,15 +194,24 @@ bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
-void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
+struct buffer_data_read_page;
+struct buffer_data_read_page *
+ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
+ struct buffer_data_read_page *page);
+int ring_buffer_read_page(struct trace_buffer *buffer,
+ struct buffer_data_read_page *data_page,
size_t len, int cpu, int full);
+void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
struct trace_seq;
int ring_buffer_print_entry_header(struct trace_seq *s);
-int ring_buffer_print_page_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
+
+int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
+int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
+int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 3c2fc291b0..b7944a8336 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -121,6 +121,11 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
down_write(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
+{
+ return down_write_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
{
up_write(&anon_vma->root->rwsem);
@@ -172,133 +177,323 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio);
typedef int __bitwise rmap_t;
/*
- * No special request: if the page is a subpage of a compound page, it is
- * mapped via a PTE. The mapped (sub)page is possibly shared between processes.
+ * No special request: A mapped anonymous (sub)page is possibly shared between
+ * processes.
*/
#define RMAP_NONE ((__force rmap_t)0)
-/* The (sub)page is exclusive to a single process. */
+/* The anonymous (sub)page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
/*
- * The compound page is not mapped via PTEs, but instead via a single PMD and
- * should be accounted accordingly.
+ * Internally, we're using an enum to specify the granularity. We make the
+ * compiler emit specialized code for each granularity.
*/
-#define RMAP_COMPOUND ((__force rmap_t)BIT(1))
+enum rmap_level {
+ RMAP_LEVEL_PTE = 0,
+ RMAP_LEVEL_PMD,
+};
+
+static inline void __folio_rmap_sanity_checks(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ /* hugetlb folios are handled separately. */
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+
+ /*
+ * TODO: we get driver-allocated folios that have nothing to do with
+ * the rmap using vm_insert_page(); therefore, we cannot assume that
+ * folio_test_large_rmappable() holds for large folios. We should
+ * handle any desired mapcount+stats accounting for these folios in
+ * VM_MIXEDMAP VMAs separately, and then sanity-check here that
+ * we really only get rmappable folios.
+ */
+
+ VM_WARN_ON_ONCE(nr_pages <= 0);
+ VM_WARN_ON_FOLIO(page_folio(page) != folio, folio);
+ VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ break;
+ case RMAP_LEVEL_PMD:
+ /*
+ * We don't support folios larger than a single PMD yet. So
+ * when RMAP_LEVEL_PMD is set, we assume that we are creating
+ * a single "entire" mapping of the folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
+ break;
+ default:
+ VM_WARN_ON_ONCE(true);
+ }
+}
/*
* rmap interfaces called when adding or removing pte of page
*/
void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
-void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address, rmap_t flags);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address);
+void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
+ folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
+void folio_add_anon_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-void page_add_file_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
- struct vm_area_struct *, bool compound);
-void page_remove_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-
-void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
+void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_add_file_rmap_pte(folio, page, vma) \
+ folio_add_file_rmap_ptes(folio, page, 1, vma)
+void folio_add_file_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_remove_rmap_pte(folio, page, vma) \
+ folio_remove_rmap_ptes(folio, page, 1, vma)
+void folio_remove_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+
+void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
-void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-static inline void __page_dup_rmap(struct page *page, bool compound)
+/* See folio_try_dup_anon_rmap_*() */
+static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma)
{
- if (compound) {
- struct folio *folio = (struct folio *)page;
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
- VM_BUG_ON_PAGE(compound && !PageHead(page), page);
- atomic_inc(&folio->_entire_mapcount);
- } else {
- atomic_inc(&page->_mapcount);
+ if (PageAnonExclusive(&folio->page)) {
+ if (unlikely(folio_needs_cow_for_dma(vma, folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
}
+ atomic_inc(&folio->_entire_mapcount);
+ return 0;
+}
+
+/* See folio_try_share_anon_rmap_*() */
+static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+static inline void hugetlb_add_file_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+
+ atomic_inc(&folio->_entire_mapcount);
+}
+
+static inline void hugetlb_remove_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
+ atomic_dec(&folio->_entire_mapcount);
}
-static inline void page_dup_file_rmap(struct page *page, bool compound)
+static __always_inline void __folio_dup_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
{
- __page_dup_rmap(page, compound);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
}
/**
- * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
- * anonymous page
- * @page: the page to duplicate the mapping for
- * @compound: the page is mapped as compound or as a small page
- * @vma: the source vma
+ * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
*
- * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * Duplicating the mapping can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages)
+{
+ __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
+}
+#define folio_dup_file_rmap_pte(folio, page) \
+ folio_dup_file_rmap_ptes(folio, page, 1)
+
+/**
+ * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
*
- * If duplicating the mapping succeeds, the page has to be mapped R/O into
- * the parent and the child. It must *not* get mapped writable after this call.
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
- * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ * The caller needs to hold the page table lock.
*/
-static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
- struct vm_area_struct *vma)
+static inline void folio_dup_file_rmap_pmd(struct folio *folio,
+ struct page *page)
{
- VM_BUG_ON_PAGE(!PageAnon(page), page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
- /*
- * No need to check+clear for already shared pages, including KSM
- * pages.
- */
- if (!PageAnonExclusive(page))
- goto dup;
+static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma,
+ enum rmap_level level)
+{
+ bool maybe_pinned;
+ int i;
+
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
/*
- * If this page may have been pinned by the parent process,
- * don't allow to duplicate the mapping but instead require to e.g.,
- * copy the page immediately for the child so that we'll always
- * guarantee the pinned page won't be randomly replaced in the
+ * If this folio may have been pinned by the parent process,
+ * don't allow to duplicate the mappings but instead require to e.g.,
+ * copy the subpage immediately for the child so that we'll always
+ * guarantee the pinned folio won't be randomly replaced in the
* future on write faults.
*/
- if (likely(!is_device_private_page(page)) &&
- unlikely(page_needs_cow_for_dma(vma, page)))
- return -EBUSY;
+ maybe_pinned = likely(!folio_is_device_private(folio)) &&
+ unlikely(folio_needs_cow_for_dma(src_vma, folio));
- ClearPageAnonExclusive(page);
/*
- * It's okay to share the anon page between both processes, mapping
- * the page R/O into both processes.
+ * No need to check+clear for already shared PTEs/PMDs of the
+ * folio. But if any page is PageAnonExclusive, we must fallback to
+ * copying if the folio maybe pinned.
*/
-dup:
- __page_dup_rmap(page, compound);
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ if (unlikely(maybe_pinned)) {
+ for (i = 0; i < nr_pages; i++)
+ if (PageAnonExclusive(page + i))
+ return -EBUSY;
+ }
+ do {
+ if (PageAnonExclusive(page))
+ ClearPageAnonExclusive(page);
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ if (PageAnonExclusive(page)) {
+ if (unlikely(maybe_pinned))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
return 0;
}
/**
- * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly
- * shared to prepare for KSM or temporary unmapping
- * @page: the exclusive anonymous page to try marking possibly shared
+ * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ * @src_vma: The vm area from which the mappings are duplicated
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * The caller needs to hold the PT lock and has to have the page table entry
- * cleared/invalidated.
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
*
- * This is similar to page_try_dup_anon_rmap(), however, not used during fork()
- * to duplicate a mapping, but instead to prepare for KSM or temporarily
- * unmapping a page (swap, migration) via page_remove_rmap().
+ * Duplicating the mappings can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
*
- * Marking the page shared can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
*
- * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY
- * otherwise.
+ * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
*/
-static inline int page_try_share_anon_rmap(struct page *page)
+static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma)
{
- VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
+ RMAP_LEVEL_PTE);
+}
+#define folio_try_dup_anon_rmap_pte(folio, page, vma) \
+ folio_try_dup_anon_rmap_ptes(folio, page, 1, vma)
- /* device private pages cannot get pinned via GUP. */
- if (unlikely(is_device_private_page(page))) {
+/**
+ * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ * @src_vma: The vm area from which the mapping is duplicated
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
+ struct page *page, struct vm_area_struct *src_vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
+static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ /* device private folios cannot get pinned via GUP. */
+ if (unlikely(folio_is_device_private(folio))) {
ClearPageAnonExclusive(page);
return 0;
}
@@ -349,7 +544,7 @@ static inline int page_try_share_anon_rmap(struct page *page)
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_mb();
- if (unlikely(page_maybe_dma_pinned(page)))
+ if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
ClearPageAnonExclusive(page);
@@ -362,6 +557,68 @@ static inline int page_try_share_anon_rmap(struct page *page)
return 0;
}
+/**
+ * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
+ * mapped by a PTE possibly shared to prepare
+ * for KSM or temporary unmapping
+ * @folio: The folio to share a mapping of
+ * @page: The mapped exclusive page
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
+ * fork() to duplicate mappings, but instead to prepare for KSM or temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
+ *
+ * Marking the mapped page shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped page possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
+ struct page *page)
+{
+ return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
+ * range mapped by a PMD possibly shared to
+ * prepare for temporary unmapping
+ * @folio: The folio to share the mapping of
+ * @page: The first page to share the mapping of
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
+ * fork() to duplicate a mapping, but instead to prepare for temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
+ *
+ * Marking the mapped pages shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped pages possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
+ struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
/*
* Called from mm/vmscan.c to handle paging out
*/
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
new file mode 100644
index 0000000000..bc8af3eb55
--- /dev/null
+++ b/include/linux/rseq.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _LINUX_RSEQ_H
+#define _LINUX_RSEQ_H
+
+#ifdef CONFIG_RSEQ
+
+#include <linux/preempt.h>
+#include <linux/sched.h>
+
+/*
+ * Map the event mask on the user-space ABI enum rseq_cs_flags
+ * for direct mask checks.
+ */
+enum rseq_event_mask_bits {
+ RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
+ RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
+ RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
+};
+
+enum rseq_event_mask {
+ RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
+ RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
+ RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
+};
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+ if (t->rseq)
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+}
+
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
+
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ if (current->rseq)
+ __rseq_handle_notify_resume(ksig, regs);
+}
+
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ preempt_disable();
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ preempt_enable();
+ rseq_handle_notify_resume(ksig, regs);
+}
+
+/* rseq_preempt() requires preemption to be disabled. */
+static inline void rseq_preempt(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/* rseq_migrate() requires preemption to be disabled. */
+static inline void rseq_migrate(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ */
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+ if (clone_flags & CLONE_VM) {
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+ } else {
+ t->rseq = current->rseq;
+ t->rseq_len = current->rseq_len;
+ t->rseq_sig = current->rseq_sig;
+ t->rseq_event_mask = current->rseq_event_mask;
+ }
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+}
+
+#else
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+}
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_preempt(struct task_struct *t)
+{
+}
+static inline void rseq_migrate(struct task_struct *t)
+{
+}
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+}
+static inline void rseq_execve(struct task_struct *t)
+{
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_RSEQ
+
+void rseq_syscall(struct pt_regs *regs);
+
+#else
+
+static inline void rseq_syscall(struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index 238bb85243..a04dacbdc8 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -10,7 +10,6 @@
#ifndef _RSLIB_H_
#define _RSLIB_H_
-#include <linux/list.h>
#include <linux/types.h> /* for gfp_t */
#include <linux/gfp.h> /* for GFP_KERNEL */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 3d6cf306cd..410529fca1 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -10,6 +10,13 @@
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
+
+static inline int rtnetlink_maybe_send(struct sk_buff *skb, struct net *net,
+ u32 pid, u32 group, int echo)
+{
+ return !skb ? 0 : rtnetlink_send(skb, net, pid, group, echo);
+}
+
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
u32 group, const struct nlmsghdr *nlh, gfp_t flags);
@@ -72,6 +79,18 @@ static inline bool lockdep_rtnl_is_held(void)
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
+/**
+ * rcu_replace_pointer_rtnl - replace an RCU pointer under rtnl_lock, returning
+ * its old value
+ * @rp: RCU pointer, whose value is returned
+ * @p: regular pointer
+ *
+ * Perform a replacement under rtnl_lock, where @rp is an RCU-annotated
+ * pointer. The old value of @rp is returned, and @rp is set to @p
+ */
+#define rcu_replace_pointer_rtnl(rp, p) \
+ rcu_replace_pointer(rp, p, lockdep_rtnl_is_held())
+
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
{
return rtnl_dereference(dev->ingress_queue);
@@ -130,4 +149,26 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
extern void rtnl_offload_xstats_notify(struct net_device *dev);
+static inline int rtnl_has_listeners(const struct net *net, u32 group)
+{
+ struct sock *rtnl = net->rtnl;
+
+ return netlink_has_listeners(rtnl, group);
+}
+
+/**
+ * rtnl_notify_needed - check if notification is needed
+ * @net: Pointer to the net namespace
+ * @nlflags: netlink ingress message flags
+ * @group: rtnl group
+ *
+ * Based on the ingress message flags and rtnl group, returns true
+ * if a notification is needed, false otherwise.
+ */
+static inline bool
+rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
+{
+ return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
+}
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 534038d962..4612ef09a0 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -60,6 +60,7 @@
#define SD_EXIST (1 << 16)
#define DELINK_INT GPIO0_INT
#define MS_OC_INT (1 << 23)
+#define SD_OVP_INT (1 << 23)
#define SD_OC_INT (1 << 22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
@@ -80,6 +81,7 @@
#define OC_INT_EN (1 << 23)
#define DELINK_INT_EN GPIO0_INT_EN
#define MS_OC_INT_EN (1 << 23)
+#define SD_OVP_INT_EN (1 << 23)
#define SD_OC_INT_EN (1 << 22)
#define RTSX_DUM_REG 0x1C
@@ -583,6 +585,7 @@
#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
+#define CDGW 0xFE53
#define WAKE_SEL_CTL 0xFE54
#define PCLK_CTL 0xFE55
#define PCLK_MODE_SEL 0x20
@@ -764,6 +767,9 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5264_AUTOLOAD_CFG2 0xFF7D
+#define RTS5264_CHIP_RST_N_SEL (1 << 6)
+
#define RTS5260_AUTOLOAD_CFG4 0xFF7F
#define RTS5260_MIMO_DISABLE 0x8A
/*RTS5261*/
@@ -1261,6 +1267,7 @@ struct rtsx_pcr {
u8 dma_error_count;
u8 ocp_stat;
u8 ocp_stat2;
+ u8 ovp_stat;
u8 rtd3_en;
};
@@ -1271,6 +1278,7 @@ struct rtsx_pcr {
#define PID_5260 0x5260
#define PID_5261 0x5261
#define PID_5228 0x5228
+#define PID_5264 0x5264
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 1dd530ce8b..9c29689ff5 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
extern void up_write(struct rw_semaphore *sem);
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-
-DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
-DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
+DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
/*
* downgrade write lock to read lock
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 292c316972..ffe8f618ab 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -10,33 +10,41 @@
#include <uapi/linux/sched.h>
#include <asm/current.h>
-
-#include <linux/pid.h>
-#include <linux/sem.h>
+#include <asm/processor.h>
+#include <linux/thread_info.h>
+#include <linux/preempt.h>
+#include <linux/cpumask.h>
+
+#include <linux/cache.h>
+#include <linux/irqflags_types.h>
+#include <linux/smp_types.h>
+#include <linux/pid_types.h>
+#include <linux/sem_types.h>
#include <linux/shm.h>
#include <linux/kmsan_types.h>
-#include <linux/mutex.h>
-#include <linux/plist.h>
-#include <linux/hrtimer.h>
-#include <linux/irqflags.h>
-#include <linux/seccomp.h>
-#include <linux/nodemask.h>
-#include <linux/rcupdate.h>
-#include <linux/refcount.h>
+#include <linux/mutex_types.h>
+#include <linux/plist_types.h>
+#include <linux/hrtimer_types.h>
+#include <linux/timer_types.h>
+#include <linux/seccomp_types.h>
+#include <linux/nodemask_types.h>
+#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
-#include <linux/syscall_user_dispatch.h>
+#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
-#include <linux/posix-timers.h>
-#include <linux/rseq.h>
-#include <linux/seqlock.h>
+#include <linux/posix-timers_types.h>
+#include <linux/restart_block.h>
+#include <uapi/linux/rseq.h>
+#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
#include <linux/livepatch_sched.h>
+#include <linux/uidgid_types.h>
#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
@@ -63,11 +71,13 @@ struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
+struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct task_struct;
struct user_event_mm;
/*
@@ -413,42 +423,6 @@ struct load_weight {
u32 inv_weight;
};
-/**
- * struct util_est - Estimation utilization of FAIR tasks
- * @enqueued: instantaneous estimated utilization of a task/cpu
- * @ewma: the Exponential Weighted Moving Average (EWMA)
- * utilization of a task
- *
- * Support data structure to track an Exponential Weighted Moving Average
- * (EWMA) of a FAIR task's utilization. New samples are added to the moving
- * average each time a task completes an activation. Sample's weight is chosen
- * so that the EWMA will be relatively insensitive to transient changes to the
- * task's workload.
- *
- * The enqueued attribute has a slightly different meaning for tasks and cpus:
- * - task: the task's util_avg at last task dequeue time
- * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
- * Thus, the util_est.enqueued of a task represents the contribution on the
- * estimated utilization of the CPU where that task is currently enqueued.
- *
- * Only for tasks we track a moving average of the past instantaneous
- * estimated utilization. This allows to absorb sporadic drops in utilization
- * of an otherwise almost periodic task.
- *
- * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
- * updates. When a task is dequeued, its util_est should not be updated if its
- * util_avg has not been updated in the meantime.
- * This information is mapped into the MSB bit of util_est.enqueued at dequeue
- * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
- * for a task) it is safe to use MSB.
- */
-struct util_est {
- unsigned int enqueued;
- unsigned int ewma;
-#define UTIL_EST_WEIGHT_SHIFT 2
-#define UTIL_AVG_UNCHANGED 0x80000000
-} __attribute__((__aligned__(sizeof(u64))));
-
/*
* The load/runnable/util_avg accumulates an infinite geometric series
* (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
@@ -503,9 +477,20 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
- struct util_est util_est;
+ unsigned int util_est;
} ____cacheline_aligned;
+/*
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est at dequeue time.
+ * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
+ * it is safe to use MSB.
+ */
+#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
+
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
@@ -523,7 +508,7 @@ struct sched_statistics {
u64 block_max;
s64 sum_block_runtime;
- u64 exec_max;
+ s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
@@ -553,7 +538,7 @@ struct sched_entity {
struct load_weight load;
struct rb_node run_node;
u64 deadline;
- u64 min_deadline;
+ u64 min_vruntime;
struct list_head group_node;
unsigned int on_rq;
@@ -607,6 +592,9 @@ struct sched_rt_entity {
#endif
} __randomize_layout;
+typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+
struct sched_dl_entity {
struct rb_node rb_node;
@@ -654,6 +642,7 @@ struct sched_dl_entity {
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
+ unsigned int dl_server : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -668,7 +657,20 @@ struct sched_dl_entity {
* timer is needed to decrease the active utilization at the correct
* time.
*/
- struct hrtimer inactive_timer;
+ struct hrtimer inactive_timer;
+
+ /*
+ * Bits for DL-server functionality. Also see the comment near
+ * dl_server_update().
+ *
+ * @rq the runqueue this server is for
+ *
+ * @server_has_tasks() returns true if @server_pick return a
+ * runnable task.
+ */
+ struct rq *rq;
+ dl_server_has_tasks_f server_has_tasks;
+ dl_server_pick_f server_pick;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -795,6 +797,7 @@ struct task_struct {
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
+ struct sched_dl_entity *dl_server;
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
@@ -917,7 +920,7 @@ struct task_struct {
unsigned sched_rt_mutex:1;
#endif
- /* Bit to tell LSMs we're in execve(): */
+ /* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
@@ -954,7 +957,7 @@ struct task_struct {
/* Recursion prevention for eventfd_signal() */
unsigned in_eventfd:1;
#endif
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_ARCH_HAS_CPU_PASID
unsigned pasid_activated:1;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
@@ -1561,114 +1564,6 @@ struct task_struct {
*/
};
-static inline struct pid *task_pid(struct task_struct *task)
-{
- return task->thread_pid;
-}
-
-/*
- * the helpers to get the task's different pids as they are seen
- * from various namespaces
- *
- * task_xid_nr() : global id, i.e. the id seen from the init namespace;
- * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
- * current.
- * task_xid_nr_ns() : id seen from the ns specified;
- *
- * see also pid_nr() etc in include/linux/pid.h
- */
-pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
-
-static inline pid_t task_pid_nr(struct task_struct *tsk)
-{
- return tsk->pid;
-}
-
-static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
-}
-
-static inline pid_t task_pid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
-}
-
-
-static inline pid_t task_tgid_nr(struct task_struct *tsk)
-{
- return tsk->tgid;
-}
-
-/**
- * pid_alive - check that a task structure is not stale
- * @p: Task structure to be checked.
- *
- * Test if a process is not yet dead (at most zombie state)
- * If pid_alive fails, then pointers within the task structure
- * can be stale and must not be dereferenced.
- *
- * Return: 1 if the process is alive. 0 otherwise.
- */
-static inline int pid_alive(const struct task_struct *p)
-{
- return p->thread_pid != NULL;
-}
-
-static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
-}
-
-static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
-}
-
-
-static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
-}
-
-static inline pid_t task_session_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
-}
-
-static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
-}
-
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
-}
-
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
-
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
-
- return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
-
-/* Obsolete, do not use: */
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
-{
- return task_pgrp_nr_ns(tsk, &init_pid_ns);
-}
-
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@@ -1712,20 +1607,6 @@ static inline char task_state_to_char(struct task_struct *tsk)
return task_index_to_char(task_state_index(tsk));
}
-/**
- * is_global_init - check if a task structure is init. Since init
- * is free to have sub-threads we need to check tgid.
- * @tsk: Task structure to be checked.
- *
- * Check if a task structure is the first user space task the kernel created.
- *
- * Return: 1 if the task structure is init. 0 otherwise.
- */
-static inline int is_global_init(struct task_struct *tsk)
-{
- return task_tgid_nr(tsk) == 1;
-}
-
extern struct pid *cad_pid;
/*
@@ -1955,9 +1836,7 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
-#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
struct task_struct task;
-#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
@@ -2177,15 +2056,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
-static inline void cond_resched_rcu(void)
-{
-#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
- rcu_read_unlock();
- cond_resched();
- rcu_read_lock();
-#endif
-}
-
#ifdef CONFIG_PREEMPT_DYNAMIC
extern bool preempt_model_none(void);
@@ -2227,37 +2097,6 @@ static inline bool preempt_model_preemptible(void)
return preempt_model_full() || preempt_model_rt();
}
-/*
- * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
- */
-static inline int spin_needbreak(spinlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
- return spin_is_contended(lock);
-#else
- return 0;
-#endif
-}
-
-/*
- * Check if a rwlock is contended.
- * Returns non-zero if there is another task waiting on the rwlock.
- * Returns zero if the lock is not contended or the system / underlying
- * rwlock implementation does not support contention detection.
- * Technically does not depend on CONFIG_PREEMPTION, but a general need
- * for low latency.
- */
-static inline int rwlock_needbreak(rwlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
- return rwlock_is_contended(lock);
-#else
- return 0;
-#endif
-}
-
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -2292,6 +2131,8 @@ extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
+#include <linux/spinlock.h>
+
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -2328,129 +2169,6 @@ static inline bool owner_on_cpu(struct task_struct *owner)
unsigned long sched_cpu_util(int cpu);
#endif /* CONFIG_SMP */
-#ifdef CONFIG_RSEQ
-
-/*
- * Map the event mask on the user-space ABI enum rseq_cs_flags
- * for direct mask checks.
- */
-enum rseq_event_mask_bits {
- RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
- RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
- RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
-};
-
-enum rseq_event_mask {
- RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
- RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
- RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
-};
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
- if (t->rseq)
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-}
-
-void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- if (current->rseq)
- __rseq_handle_notify_resume(ksig, regs);
-}
-
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
- rseq_handle_notify_resume(ksig, regs);
-}
-
-/* rseq_preempt() requires preemption to be disabled. */
-static inline void rseq_preempt(struct task_struct *t)
-{
- __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
-}
-
-/* rseq_migrate() requires preemption to be disabled. */
-static inline void rseq_migrate(struct task_struct *t)
-{
- __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
-}
-
-/*
- * If parent process has a registered restartable sequences area, the
- * child inherits. Unregister rseq for a clone with CLONE_VM set.
- */
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
-{
- if (clone_flags & CLONE_VM) {
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
- } else {
- t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
- t->rseq_sig = current->rseq_sig;
- t->rseq_event_mask = current->rseq_event_mask;
- }
-}
-
-static inline void rseq_execve(struct task_struct *t)
-{
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
-}
-
-#else
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
-}
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_preempt(struct task_struct *t)
-{
-}
-static inline void rseq_migrate(struct task_struct *t)
-{
-}
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
-{
-}
-static inline void rseq_execve(struct task_struct *t)
-{
-}
-
-#endif
-
-#ifdef CONFIG_DEBUG_RSEQ
-
-void rseq_syscall(struct pt_regs *regs);
-
-#else
-
-static inline void rseq_syscall(struct pt_regs *regs)
-{
-}
-
-#endif
-
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index fe1a46f30d..2b461129d1 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,6 +2,7 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
@@ -67,7 +68,8 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type)
static inline bool cpu_is_isolated(int cpu)
{
return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
- !housekeeping_test_cpu(cpu, HK_TYPE_TICK);
+ !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
+ cpuset_cpu_is_isolated(cpu);
}
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3499c1a8b9..4b7664c562 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -9,6 +9,7 @@
#include <linux/sched/task.h>
#include <linux/cred.h>
#include <linux/refcount.h>
+#include <linux/pid.h>
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
@@ -432,7 +433,6 @@ static inline bool fault_signal_pending(vm_fault_t fault_flags,
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
-extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);
@@ -646,6 +646,9 @@ extern bool current_is_single_threaded(void);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define for_other_threads(p, t) \
+ for (t = p; (t = next_thread(t)) != p; )
+
#define __for_each_thread(signal, t) \
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
lockdep_is_held(&tasklist_lock))
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index a23af225c8..d362aacf9f 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -7,6 +7,8 @@
* functionality:
*/
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -226,4 +228,6 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index f158b025c1..ccd72b978e 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/magic.h>
+#include <linux/refcount.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index de545ba852..a6e04b4a21 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -279,6 +279,14 @@ void arch_update_thermal_pressure(const struct cpumask *cpus,
{ }
#endif
+#ifndef arch_scale_freq_ref
+static __always_inline
+unsigned int arch_scale_freq_ref(int cpu)
+{
+ return 0;
+}
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 175079552f..709ad84809 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,6 +3,7 @@
#define _LINUX_SECCOMP_H
#include <uapi/linux/seccomp.h>
+#include <linux/seccomp_types.h>
#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_LOG | \
@@ -21,25 +22,6 @@
#include <linux/atomic.h>
#include <asm/seccomp.h>
-struct seccomp_filter;
-/**
- * struct seccomp - the state of a seccomp'ed process
- *
- * @mode: indicates one of the valid values above for controlled
- * system calls available to a process.
- * @filter_count: number of seccomp filters
- * @filter: must always point to a valid seccomp-filter or NULL as it is
- * accessed without locking during system call entry.
- *
- * @filter must only be accessed from the context of current as there
- * is no read locking.
- */
-struct seccomp {
- int mode;
- atomic_t filter_count;
- struct seccomp_filter *filter;
-};
-
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
@@ -64,8 +46,6 @@ static inline int seccomp_mode(struct seccomp *s)
#include <linux/errno.h>
-struct seccomp { };
-struct seccomp_filter { };
struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
@@ -126,6 +106,8 @@ static inline long seccomp_get_metadata(struct task_struct *task,
#ifdef CONFIG_SECCOMP_CACHE_DEBUG
struct seq_file;
+struct pid_namespace;
+struct pid;
int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
diff --git a/include/linux/seccomp_types.h b/include/linux/seccomp_types.h
new file mode 100644
index 0000000000..cf0a035502
--- /dev/null
+++ b/include/linux/seccomp_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SECCOMP_TYPES_H
+#define _LINUX_SECCOMP_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_SECCOMP
+
+struct seccomp_filter;
+/**
+ * struct seccomp - the state of a seccomp'ed process
+ *
+ * @mode: indicates one of the valid values above for controlled
+ * system calls available to a process.
+ * @filter_count: number of seccomp filters
+ * @filter: must always point to a valid seccomp-filter or NULL as it is
+ * accessed without locking during system call entry.
+ *
+ * @filter must only be accessed from the context of current as there
+ * is no read locking.
+ */
+struct seccomp {
+ int mode;
+ atomic_t filter_count;
+ struct seccomp_filter *filter;
+};
+
+#else
+
+struct seccomp { };
+struct seccomp_filter { };
+
+#endif
+
+#endif /* _LINUX_SECCOMP_TYPES_H */
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
index 35f3a4a8ce..acf7e1a3f3 100644
--- a/include/linux/secretmem.h
+++ b/include/linux/secretmem.h
@@ -13,10 +13,10 @@ static inline bool folio_is_secretmem(struct folio *folio)
/*
* Using folio_mapping() is quite slow because of the actual call
* instruction.
- * We know that secretmem pages are not compound and LRU so we can
+ * We know that secretmem pages are not compound, so we can
* save a couple of cycles here.
*/
- if (folio_test_large(folio) || !folio_test_lru(folio))
+ if (folio_test_large(folio))
return false;
mapping = (struct address_space *)
diff --git a/include/linux/security.h b/include/linux/security.h
index 9d3138c636..3180d823e0 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
+#include <uapi/linux/lsm.h>
struct linux_binprm;
struct cred;
@@ -60,6 +61,7 @@ struct fs_parameter;
enum fs_value_type;
struct watch;
struct watch_notification;
+struct lsm_ctx;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -138,6 +140,8 @@ enum lockdown_reason {
};
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+extern u32 lsm_active_cnt;
+extern const struct lsm_id *lsm_idlist[];
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
@@ -261,6 +265,7 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb);
/* prototypes */
extern int security_init(void);
extern int early_security_init(void);
+extern u64 lsm_name_to_attr(const char *name);
/* Security operations */
int security_binder_set_context_mgr(const struct cred *mgr);
@@ -472,10 +477,13 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, const char *name,
+int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 __user *size, u32 flags);
+int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 size, u32 flags);
+int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
-int security_setprocattr(const char *lsm, const char *name, void *value,
- size_t size);
+int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -486,6 +494,8 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
+int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len,
+ void *val, size_t val_len, u64 id, u64 flags);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -503,6 +513,11 @@ static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb)
return 0;
}
+static inline u64 lsm_name_to_attr(const char *name)
+{
+ return LSM_ATTR_UNDEF;
+}
+
static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -1346,14 +1361,28 @@ static inline void security_d_instantiate(struct dentry *dentry,
struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, const char *lsm,
+static inline int security_getselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_setselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_getprocattr(struct task_struct *p, int lsmid,
const char *name, char **value)
{
return -EINVAL;
}
-static inline int security_setprocattr(const char *lsm, char *name,
- void *value, size_t size)
+static inline int security_setprocattr(int lsmid, char *name, void *value,
+ size_t size)
{
return -EINVAL;
}
@@ -1404,6 +1433,12 @@ static inline int security_locked_down(enum lockdown_reason what)
{
return 0;
}
+static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
+ u32 *uctx_len, void *val, size_t val_len,
+ u64 id, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 5608a500c4..c4deefe42a 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -3,25 +3,17 @@
#define _LINUX_SEM_H
#include <uapi/linux/sem.h>
+#include <linux/sem_types.h>
struct task_struct;
-struct sem_undo_list;
#ifdef CONFIG_SYSVIPC
-struct sysv_sem {
- struct sem_undo_list *undo_list;
-};
-
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
#else
-struct sysv_sem {
- /* empty */
-};
-
static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
return 0;
diff --git a/include/linux/sem_types.h b/include/linux/sem_types.h
new file mode 100644
index 0000000000..73df1971a7
--- /dev/null
+++ b/include/linux/sem_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SEM_TYPES_H
+#define _LINUX_SEM_TYPES_H
+
+struct sem_undo_list;
+
+struct sysv_sem {
+#ifdef CONFIG_SYSVIPC
+ struct sem_undo_list *undo_list;
+#endif
+};
+
+#endif /* _LINUX_SEM_TYPES_H */
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index c44f4b47b9..fe41da0059 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -2,7 +2,10 @@
#ifndef _LINUX_SEQ_BUF_H
#define _LINUX_SEQ_BUF_H
-#include <linux/fs.h>
+#include <linux/bug.h>
+#include <linux/minmax.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
/*
* Trace sequences are used to allow a function to call several other functions
@@ -10,7 +13,7 @@
*/
/**
- * seq_buf - seq buffer structure
+ * struct seq_buf - seq buffer structure
* @buffer: pointer to the buffer
* @size: size of the buffer
* @len: the amount of data inside the buffer
@@ -77,10 +80,10 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
- * seq_buf_str - get %NUL-terminated C string from seq_buf
+ * seq_buf_str - get NUL-terminated C string from seq_buf
* @s: the seq_buf handle
*
- * This makes sure that the buffer in @s is nul terminated and
+ * This makes sure that the buffer in @s is NUL-terminated and
* safe to read as a string.
*
* Note, if this is called when the buffer has overflowed, then
@@ -90,7 +93,7 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
* After this function is called, s->buffer is safe to use
* in string operations.
*
- * Returns @s->buf after making sure it is terminated.
+ * Returns: @s->buf after making sure it is terminated.
*/
static inline const char *seq_buf_str(struct seq_buf *s)
{
@@ -110,7 +113,7 @@ static inline const char *seq_buf_str(struct seq_buf *s)
* @s: the seq_buf handle
* @bufp: the beginning of the buffer is stored here
*
- * Return the number of bytes available in the buffer, or zero if
+ * Returns: the number of bytes available in the buffer, or zero if
* there's no space.
*/
static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
@@ -132,7 +135,7 @@ static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
* @num: the number of bytes to commit
*
* Commit @num bytes of data written to a buffer previously acquired
- * by seq_buf_get. To signal an error condition, or that the data
+ * by seq_buf_get_buf(). To signal an error condition, or that the data
* didn't fit in the available space, pass a negative @num value.
*/
static inline void seq_buf_commit(struct seq_buf *s, int num)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e92f9d5577..d90d8ee29d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -18,6 +18,7 @@
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/preempt.h>
+#include <linux/seqlock_types.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
@@ -37,37 +38,6 @@
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
-/*
- * Sequence counters (seqcount_t)
- *
- * This is the raw counting mechanism, without any writer protection.
- *
- * Write side critical sections must be serialized and non-preemptible.
- *
- * If readers can be invoked from hardirq or softirq contexts,
- * interrupts or bottom halves must also be respectively disabled before
- * entering the write section.
- *
- * This mechanism can't be used if the protected data contains pointers,
- * as the writer can invalidate a pointer that a reader is following.
- *
- * If the write serialization mechanism is one of the common kernel
- * locking primitives, use a sequence counter with associated lock
- * (seqcount_LOCKNAME_t) instead.
- *
- * If it's desired to automatically handle the sequence counter writer
- * serialization and non-preemptibility requirements, use a sequential
- * lock (seqlock_t) instead.
- *
- * See Documentation/locking/seqlock.rst
- */
-typedef struct seqcount {
- unsigned sequence;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} seqcount_t;
-
static inline void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
@@ -132,28 +102,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*/
/*
- * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
- * disable preemption. It can lead to higher latencies, and the write side
- * sections will not be able to acquire locks which become sleeping locks
- * (e.g. spinlock_t).
- *
- * To remain preemptible while avoiding a possible livelock caused by the
- * reader preempting the writer, use a different technique: let the reader
- * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
- * case, acquire then release the associated LOCKNAME writer serialization
- * lock. This will allow any possibly-preempted writer to make progress
- * until the end of its writer serialization lock critical section.
- *
- * This lock-unlock technique must be implemented for all of PREEMPT_RT
- * sleeping locks. See Documentation/locking/locktypes.rst
- */
-#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
-#define __SEQ_LOCK(expr) expr
-#else
-#define __SEQ_LOCK(expr)
-#endif
-
-/*
* typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
* @lock: Pointer to the associated lock
@@ -194,11 +142,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lockbase: prefix for associated lock/unlock
*/
#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
-typedef struct seqcount_##lockname { \
- seqcount_t seqcount; \
- __SEQ_LOCK(locktype *lock); \
-} seqcount_##lockname##_t; \
- \
static __always_inline seqcount_t * \
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
@@ -284,6 +227,7 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@@ -794,25 +738,6 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
smp_wmb(); /* increment "sequence" before following stores */
}
-/*
- * Sequential locks (seqlock_t)
- *
- * Sequence counters with an embedded spinlock for writer serialization
- * and non-preemptibility.
- *
- * For more info, see:
- * - Comments on top of seqcount_t
- * - Documentation/locking/seqlock.rst
- */
-typedef struct {
- /*
- * Make sure that readers don't starve writers on PREEMPT_RT: use
- * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
- */
- seqcount_spinlock_t seqcount;
- spinlock_t lock;
-} seqlock_t;
-
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
new file mode 100644
index 0000000000..dfdf43e3fa
--- /dev/null
+++ b/include/linux/seqlock_types.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SEQLOCK_TYPES_H
+#define __LINUX_SEQLOCK_TYPES_H
+
+#include <linux/lockdep_types.h>
+#include <linux/mutex_types.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Sequence counters (seqcount_t)
+ *
+ * This is the raw counting mechanism, without any writer protection.
+ *
+ * Write side critical sections must be serialized and non-preemptible.
+ *
+ * If readers can be invoked from hardirq or softirq contexts,
+ * interrupts or bottom halves must also be respectively disabled before
+ * entering the write section.
+ *
+ * This mechanism can't be used if the protected data contains pointers,
+ * as the writer can invalidate a pointer that a reader is following.
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKNAME_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+ * lock (seqlock_t) instead.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+typedef struct seqcount {
+ unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} seqcount_t;
+
+/*
+ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
+ * disable preemption. It can lead to higher latencies, and the write side
+ * sections will not be able to acquire locks which become sleeping locks
+ * (e.g. spinlock_t).
+ *
+ * To remain preemptible while avoiding a possible livelock caused by the
+ * reader preempting the writer, use a different technique: let the reader
+ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
+ * case, acquire then release the associated LOCKNAME writer serialization
+ * lock. This will allow any possibly-preempted writer to make progress
+ * until the end of its writer serialization lock critical section.
+ *
+ * This lock-unlock technique must be implemented for all of PREEMPT_RT
+ * sleeping locks. See Documentation/locking/locktypes.rst
+ */
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t;
+
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
+
+/*
+ * Sequential locks (seqlock_t)
+ *
+ * Sequence counters with an embedded spinlock for writer serialization
+ * and non-preemptibility.
+ *
+ * For more info, see:
+ * - Comments on top of seqcount_t
+ * - Documentation/locking/seqlock.rst
+ */
+typedef struct {
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
+ spinlock_t lock;
+} seqlock_t;
+
+#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index f5f97fa25e..3fab88ba26 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -27,7 +27,7 @@ struct serdev_device;
* not sleep.
*/
struct serdev_device_ops {
- int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+ ssize_t (*receive_buf)(struct serdev_device *, const u8 *, size_t);
void (*write_wakeup)(struct serdev_device *);
};
@@ -82,7 +82,7 @@ enum serdev_parity {
* serdev controller structures
*/
struct serdev_controller_ops {
- int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+ ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
void (*write_flush)(struct serdev_controller *);
int (*write_room)(struct serdev_controller *);
int (*open)(struct serdev_controller *);
@@ -99,12 +99,14 @@ struct serdev_controller_ops {
/**
* struct serdev_controller - interface to the serdev controller
* @dev: Driver model representation of the device.
+ * @host: Serial port hardware controller device
* @nr: number identifier for this controller/bus.
* @serdev: Pointer to slave device for this controller.
* @ops: Controller operations.
*/
struct serdev_controller {
struct device dev;
+ struct device *host;
unsigned int nr;
struct serdev_device *serdev;
const struct serdev_controller_ops *ops;
@@ -167,7 +169,9 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *);
int serdev_device_add(struct serdev_device *);
void serdev_device_remove(struct serdev_device *);
-struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+struct serdev_controller *serdev_controller_alloc(struct device *host,
+ struct device *parent,
+ size_t size);
int serdev_controller_add(struct serdev_controller *);
void serdev_controller_remove(struct serdev_controller *);
@@ -181,9 +185,9 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
serdev->ops->write_wakeup(serdev);
}
-static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
- const unsigned char *data,
- size_t count)
+static inline ssize_t serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const u8 *data,
+ size_t count)
{
struct serdev_device *serdev = ctrl->serdev;
@@ -200,13 +204,13 @@ void serdev_device_close(struct serdev_device *);
int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
-int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+int serdev_device_write_buf(struct serdev_device *, const u8 *, size_t);
void serdev_device_wait_until_sent(struct serdev_device *, long);
int serdev_device_get_tiocm(struct serdev_device *);
int serdev_device_set_tiocm(struct serdev_device *, int, int);
int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
-int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long);
+ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
@@ -244,7 +248,7 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
}
static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
static inline int serdev_device_write_buf(struct serdev_device *serdev,
- const unsigned char *buf,
+ const u8 *buf,
size_t count)
{
return -ENODEV;
@@ -262,8 +266,9 @@ static inline int serdev_device_break_ctl(struct serdev_device *serdev, int brea
{
return -EOPNOTSUPP;
}
-static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
- size_t count, unsigned long timeout)
+static inline ssize_t serdev_device_write(struct serdev_device *sdev,
+ const u8 *buf, size_t count,
+ unsigned long timeout)
{
return -ENODEV;
}
@@ -311,11 +316,13 @@ struct tty_driver;
#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT
struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx);
int serdev_tty_port_unregister(struct tty_port *port);
#else
static inline struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx)
{
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 678409c47b..bb0f2d4ac6 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -875,9 +875,9 @@ static inline unsigned long uart_fifo_timeout(struct uart_port *port)
}
/* Base timer interval for polling */
-static inline int uart_poll_timeout(struct uart_port *port)
+static inline unsigned long uart_poll_timeout(struct uart_port *port)
{
- int timeout = uart_fifo_timeout(port);
+ unsigned long timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
diff --git a/include/linux/shm.h b/include/linux/shm.h
index d8e69aed3d..c55bef0538 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -2,12 +2,12 @@
#ifndef _LINUX_SHM_H_
#define _LINUX_SHM_H_
-#include <linux/list.h>
+#include <linux/types.h>
#include <asm/page.h>
-#include <uapi/linux/shm.h>
#include <asm/shmparam.h>
struct file;
+struct task_struct;
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 66828dfc6e..f0c6bf9828 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -114,8 +114,17 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
struct mm_struct *mm, unsigned long vm_flags);
+#else
+static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ struct mm_struct *mm, unsigned long vm_flags)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_SHMEM
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
#else
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 3b98e7a285..f19816832f 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -3,6 +3,7 @@
#define _LINUX_SIGNAL_H
#include <linux/bug.h>
+#include <linux/list.h>
#include <linux/signal_types.h>
#include <linux/string.h>
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index a70b2bdbf4..caf4f7a59a 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -6,7 +6,7 @@
* Basic signal handling related data type definitions:
*/
-#include <linux/list.h>
+#include <linux/types.h>
#include <uapi/linux/signal.h>
typedef struct kernel_siginfo {
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index 84aa448d8b..c3a00b967d 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -47,8 +47,17 @@
#define SZ_8G _AC(0x200000000, ULL)
#define SZ_16G _AC(0x400000000, ULL)
#define SZ_32G _AC(0x800000000, ULL)
+#define SZ_64G _AC(0x1000000000, ULL)
+#define SZ_128G _AC(0x2000000000, ULL)
+#define SZ_256G _AC(0x4000000000, ULL)
+#define SZ_512G _AC(0x8000000000, ULL)
#define SZ_1T _AC(0x10000000000, ULL)
+#define SZ_2T _AC(0x20000000000, ULL)
+#define SZ_4T _AC(0x40000000000, ULL)
+#define SZ_8T _AC(0x80000000000, ULL)
+#define SZ_16T _AC(0x100000000000, ULL)
+#define SZ_32T _AC(0x200000000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bd06942757..5bafcfe18b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -566,6 +566,15 @@ struct ubuf_info_msgzc {
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
+/* Preserve some data across TX submission and completion.
+ *
+ * Note, this state is stored in the driver. Extending the layout
+ * might need some special care.
+ */
+struct xsk_tx_metadata_compl {
+ __u64 *tx_timestamp;
+};
+
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
@@ -578,7 +587,10 @@ struct skb_shared_info {
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
struct sk_buff *frag_list;
- struct skb_shared_hwtstamps hwtstamps;
+ union {
+ struct skb_shared_hwtstamps hwtstamps;
+ struct xsk_tx_metadata_compl xsk_meta;
+ };
unsigned int gso_type;
u32 tskey;
@@ -736,13 +748,10 @@ typedef unsigned char *sk_buff_data_t;
* @list: queue head
* @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
- * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
- * fragmentation management
* @dev: Device we arrived on/are leaving by
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
- * @sp: the security path, used for xfrm
* @len: Length of actual data
* @data_len: Data length
* @mac_len: Length of link layer header
@@ -776,7 +785,6 @@ typedef unsigned char *sk_buff_data_t;
* @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
* @_sk_redir: socket redirection information for skmsg
* @_nfct: Associated connection, if any (with nfctinfo bits)
- * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
* @hash: the packet hash
@@ -860,10 +868,7 @@ struct sk_buff {
struct llist_node ll_node;
};
- union {
- struct sock *sk;
- int ip_defrag_offset;
- };
+ struct sock *sk;
union {
ktime_t tstamp;
@@ -1057,7 +1062,7 @@ struct sk_buff {
refcount_t users;
#ifdef CONFIG_SKB_EXTENSIONS
- /* only useable after checking ->active_extensions != 0 */
+ /* only usable after checking ->active_extensions != 0 */
struct skb_ext *extensions;
#endif
};
@@ -2632,6 +2637,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->data -= len;
skb->len += len;
return skb->data;
@@ -2640,6 +2647,8 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->len -= len;
if (unlikely(skb->len < skb->data_len)) {
#if defined(CONFIG_DEBUG_NET)
@@ -2664,6 +2673,8 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline enum skb_drop_reason
pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
if (likely(len <= skb_headlen(skb)))
return SKB_NOT_DROPPED_YET;
@@ -2836,6 +2847,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header += offset;
}
+static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
+{
+ return skb->inner_network_header > 0;
+}
+
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
@@ -3299,7 +3315,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
unsigned int order)
{
/* This piece of code contains several assumptions.
- * 1. This is for device Rx, therefor a cold page is preferred.
+ * 1. This is for device Rx, therefore a cold page is preferred.
* 2. The expectation is the user wants a compound page.
* 3. If requesting a order 0 page it will not be compound
* due to the check to see if order has a value in prep_new_page
@@ -4007,6 +4023,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
+int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
@@ -4245,10 +4262,13 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
{
const void *a = skb_metadata_end(skb_a);
const void *b = skb_metadata_end(skb_b);
- /* Using more efficient varaiant than plain call to memcmp(). */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 diffs = 0;
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ BITS_PER_LONG != 64)
+ goto slow;
+
+ /* Using more efficient variant than plain call to memcmp(). */
switch (meta_len) {
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
@@ -4268,11 +4288,11 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
+ default:
+slow:
+ return memcmp(a - meta_len, b - meta_len, meta_len);
}
return diffs;
-#else
- return memcmp(a - meta_len, b - meta_len, meta_len);
-#endif
}
static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index bd4418377b..e65ec3fd27 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -100,6 +100,11 @@ struct sk_psock {
void (*saved_close)(struct sock *sk, long timeout);
void (*saved_write_space)(struct sock *sk);
void (*saved_data_ready)(struct sock *sk);
+ /* psock_update_sk_prot may be called with restore=false many times
+ * so the handler must be safe for this case. It will be called
+ * exactly once with restore=true when the psock is being destroyed
+ * and psock refcnt is zero, but before an RCU grace period.
+ */
int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
bool restore);
struct proto *sk_proto;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d6d6ffeeb9..b5f5ee8308 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,7 +24,7 @@
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+ * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
@@ -302,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void)
* Kmalloc array related definitions
*/
-#ifdef CONFIG_SLAB
/*
- * SLAB and SLUB directly allocates requests fitting in to an order-1 page
+ * SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 5
-#endif
-#endif
-
-#ifdef CONFIG_SLUB
-#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
-#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
@@ -788,12 +778,4 @@ size_t kmalloc_size_roundup(size_t size);
void __init kmem_cache_init_late(void);
-#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
-int slab_prepare_cpu(unsigned int cpu);
-int slab_dead_cpu(unsigned int cpu);
-#else
-#define slab_prepare_cpu NULL
-#define slab_dead_cpu NULL
-#endif
-
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
deleted file mode 100644
index a61e7d55d0..0000000000
--- a/include/linux/slab_def.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLAB_DEF_H
-#define _LINUX_SLAB_DEF_H
-
-#include <linux/kfence.h>
-#include <linux/reciprocal_div.h>
-
-/*
- * Definitions unique to the original Linux SLAB allocator.
- */
-
-struct kmem_cache {
- struct array_cache __percpu *cpu_cache;
-
-/* 1) Cache tunables. Protected by slab_mutex */
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
-
- unsigned int size;
- struct reciprocal_value reciprocal_buffer_size;
-/* 2) touched by every alloc & free from the backend */
-
- slab_flags_t flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
-
-/* 3) cache_grow/shrink */
- /* order of pgs per slab (2^n) */
- unsigned int gfporder;
-
- /* force GFP flags, e.g. GFP_DMA */
- gfp_t allocflags;
-
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- unsigned int freelist_size;
-
- /* constructor func */
- void (*ctor)(void *obj);
-
-/* 4) cache creation/removal */
- const char *name;
- struct list_head list;
- int refcount;
- int object_size;
- int align;
-
-/* 5) statistics */
-#ifdef CONFIG_DEBUG_SLAB
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
-
- /*
- * If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. 'size' contains the total
- * object size including these internal fields, while 'obj_offset'
- * and 'object_size' contain the offset to the user object and its
- * size.
- */
- int obj_offset;
-#endif /* CONFIG_DEBUG_SLAB */
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x)
-{
- void *object = x - (x - slab->s_mem) % cache->size;
- void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
-
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- u32 offset = (obj - slab->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- if (is_kfence_address(slab_address(slab)))
- return 1;
- return cache->num;
-}
-
-#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
deleted file mode 100644
index deb90cf4bf..0000000000
--- a/include/linux/slub_def.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLUB_DEF_H
-#define _LINUX_SLUB_DEF_H
-
-/*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
-#include <linux/kfence.h>
-#include <linux/kobject.h>
-#include <linux/reciprocal_div.h>
-#include <linux/local_lock.h>
-
-enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS
-};
-
-#ifndef CONFIG_SLUB_TINY
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
- union {
- struct {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- };
- freelist_aba_t freelist_tid;
- };
- struct slab *slab; /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct slab *partial; /* Partially allocated frozen slabs */
-#endif
- local_lock_t lock; /* Protects the fields above */
-#ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-#endif /* CONFIG_SLUB_TINY */
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_percpu_partial(c) ((c)->partial)
-
-#define slub_set_percpu_partial(c, p) \
-({ \
- slub_percpu_partial(c) = (p)->next; \
-})
-
-#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
-#else
-#define slub_percpu_partial(c) NULL
-
-#define slub_set_percpu_partial(c, p)
-
-#define slub_percpu_partial_read_once(c) NULL
-#endif // CONFIG_SLUB_CPU_PARTIAL
-
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
-/*
- * Slab cache management.
- */
-struct kmem_cache {
-#ifndef CONFIG_SLUB_TINY
- struct kmem_cache_cpu __percpu *cpu_slab;
-#endif
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
- /* Number of per cpu partial slabs to keep around */
- unsigned int cpu_partial_slabs;
-#endif
- struct kmem_cache_order_objects oo;
-
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
-#ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
-#endif
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
-#endif
-
-#ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
-#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_unlink(struct kmem_cache *);
-void sysfs_slab_release(struct kmem_cache *);
-#else
-static inline void sysfs_slab_unlink(struct kmem_cache *s)
-{
-}
-static inline void sysfs_slab_release(struct kmem_cache *s)
-{
-}
-#endif
-
-void *fixup_red_left(struct kmem_cache *s, void *p);
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x) {
- void *object = x - (x - slab_address(slab)) % cache->size;
- void *last_object = slab_address(slab) +
- (slab->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
-
- result = fixup_red_left(cache, result);
- return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
-{
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- if (is_kfence_address(obj))
- return 0;
- return __obj_to_index(cache, slab_address(slab), obj);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- return slab->objects;
-}
-#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index fc456f75c1..8c9ca857cc 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -161,24 +161,6 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
struct completion *completion, bool atomic);
/*
- * Send a message to the given endpoint and wait until it has been submitted
- * to the hardware FIFO.
- * Will return zero on success and a negative error code on failure
- * (e.g. -ETIME when the message couldn't be written within the given
- * timeout)
- *
- * @rtk: RTKit reference
- * @ep: target endpoint
- * @message: message to be sent
- * @timeout: timeout in milliseconds to allow the message transmission
- * to be completed
- * @atomic: if set to true this function can be called from atomic
- * context.
- */
-int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
- unsigned long timeout, bool atomic);
-
-/*
* Process incoming messages in atomic context.
* This only guarantees that messages arrive as far as the recv_message_early
* callback; drivers expecting to handle incoming messages synchronously
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index 2475ef9147..4885b065b8 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -62,6 +62,14 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL_2L2,
DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_PADDING0,
+ DDP_COMPONENT_PADDING1,
+ DDP_COMPONENT_PADDING2,
+ DDP_COMPONENT_PADDING3,
+ DDP_COMPONENT_PADDING4,
+ DDP_COMPONENT_PADDING5,
+ DDP_COMPONENT_PADDING6,
+ DDP_COMPONENT_PADDING7,
DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index c383579a00..66f814b63a 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -1042,7 +1042,7 @@ int sdw_compute_params(struct sdw_bus *bus);
int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream);
int sdw_stream_remove_master(struct sdw_bus *bus,
@@ -1064,7 +1064,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id
int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream);
int sdw_stream_remove_slave(struct sdw_slave *slave,
@@ -1086,7 +1086,7 @@ int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
static inline int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream)
{
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 6b0a7dc48a..f866d5c8ed 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -233,6 +233,8 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * not all driver provides supports_op(), so it can return -EOPNOTSUPP
+ * if the op is not supported by the driver/controller
* @get_name: get a custom name for the SPI mem device from the controller.
* This might be needed if the controller driver has been ported
* to use the SPI mem layer and a custom name is used to keep
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 255a0562ae..600fbd5daf 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -20,6 +20,9 @@
#include <uapi/linux/spi/spi.h>
+/* Max no. of CS supported per spi device */
+#define SPI_CS_CNT_MAX 16
+
struct dma_chan;
struct software_node;
struct ptp_system_timestamp;
@@ -132,7 +135,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @controller.
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
* @mode: The spi mode defines how data is clocked out and in.
* This may be changed by the device's driver.
* The "active low" default for chipselect mode can be overridden
@@ -157,8 +161,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: GPIO descriptor of the chipselect line (optional, NULL when
- * not using a GPIO line)
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
* @cs_setup: delay to be introduced by the controller after CS is asserted
@@ -167,6 +171,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
* @pcpu_statistics: statistics for the spi_device
+ * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
*
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
@@ -182,7 +187,7 @@ struct spi_device {
struct spi_controller *controller;
struct spi_controller *master; /* Compatibility layer */
u32 max_speed_hz;
- u8 chip_select;
+ u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
#define SPI_NO_TX BIT(31) /* No transmit wire */
@@ -213,7 +218,7 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod; /* Chip select GPIO descriptor */
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
struct spi_delay word_delay; /* Inter-word delay */
/* CS delays */
struct spi_delay cs_setup;
@@ -223,6 +228,13 @@ struct spi_device {
/* The statistics */
struct spi_statistics __percpu *pcpu_statistics;
+ /* Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array.When the controller is capable to handle
+ * multiple chip selects & memories are connected in parallel
+ * then more than one bit need to be set in cs_index_mask.
+ */
+ u32 cs_index_mask : SPI_CS_CNT_MAX;
+
/*
* Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -279,22 +291,33 @@ static inline void *spi_get_drvdata(const struct spi_device *spi)
static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
{
- return spi->chip_select;
+ return spi->chip_select[idx];
}
static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
{
- spi->chip_select = chipselect;
+ spi->chip_select[idx] = chipselect;
}
static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
{
- return spi->cs_gpiod;
+ return spi->cs_gpiod[idx];
}
static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
{
- spi->cs_gpiod = csgpiod;
+ spi->cs_gpiod[idx] = csgpiod;
+}
+
+static inline bool spi_is_csgpiod(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (spi_get_csgpiod(spi, idx))
+ return true;
+ }
+ return false;
}
/**
@@ -399,6 +422,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
+ * @multi_cs_cap: indicates that the SPI Controller can assert/de-assert
+ * more than one chip select at once.
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
* must fail if an unrecognized or unsupported mode is requested.
@@ -461,10 +486,13 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer. Note: transfer_one and
- * transfer_one_message are mutually exclusive; when both
- * are set, the generic subsystem does not call your
- * transfer_one callback.
+ * can issue the next transfer. If the transfer fails, the
+ * driver must set the flag SPI_TRANS_FAIL_IO to
+ * spi_transfer->error first, before calling
+ * spi_finalize_current_transfer().
+ * Note: transfer_one and transfer_one_message are mutually
+ * exclusive; when both are set, the generic subsystem does
+ * not call your transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
@@ -567,6 +595,11 @@ struct spi_controller {
#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+ /*
+ * The spi-controller has multi chip select capability and can
+ * assert/de-assert more than one chip select at once.
+ */
+#define SPI_CONTROLLER_MULTI_CS BIT(7)
/* Flag indicating if the allocation of this struct is devres-managed */
bool devm_allocated;
@@ -677,7 +710,8 @@ struct spi_controller {
bool rt;
bool auto_runtime_pm;
bool cur_msg_mapped;
- char last_cs;
+ char last_cs[SPI_CS_CNT_MAX];
+ char last_cs_index_mask;
bool last_cs_mode_high;
bool fallback;
struct completion xfer_completion;
@@ -1040,6 +1074,7 @@ struct spi_transfer {
unsigned len;
#define SPI_TRANS_FAIL_NO_START BIT(0)
+#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
dma_addr_t tx_dma;
@@ -1638,8 +1673,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
/* Compatibility layer */
#define spi_master spi_controller
-#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-
#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
#define spi_master_set_devdata(_ctlr, _data) \
spi_controller_set_devdata(_ctlr, _data)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 31d3d747a9..3fcd20de6c 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -456,6 +456,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
#endif /* CONFIG_PREEMPT_RT */
/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return spin_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
@@ -507,6 +538,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
@@ -515,23 +548,62 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
+ raw_spin_trylock_irqsave(_T->lock, _T->flags))
+
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+ spin_trylock_irq(_T->lock))
+
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
+ spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
+ read_lock(_T->lock),
+ read_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
+ read_lock_irq(_T->lock),
+ read_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
+ read_lock_irqsave(_T->lock, _T->flags),
+ read_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
+ write_lock(_T->lock),
+ write_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
+ write_lock_irq(_T->lock),
+ write_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
+ write_lock_irqsave(_T->lock, _T->flags),
+ write_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 6c46157343..9dec4861d0 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -68,28 +68,37 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
typedef int (splice_direct_actor)(struct pipe_inode_info *,
struct splice_desc *);
-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
- struct splice_desc *, splice_actor *);
-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
- struct splice_pipe_desc *);
-extern ssize_t add_to_pipe(struct pipe_inode_info *,
- struct pipe_buffer *);
-long vfs_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags);
-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
- splice_direct_actor *);
-extern long do_splice(struct file *in, loff_t *off_in,
- struct file *out, loff_t *off_out,
- size_t len, unsigned int flags);
+ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags,
+ splice_actor *actor);
+ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
+ struct splice_desc *sd, splice_actor *actor);
+ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf);
+ssize_t vfs_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+ssize_t splice_direct_to_actor(struct file *file, struct splice_desc *sd,
+ splice_direct_actor *actor);
+ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out,
+ loff_t *off_out, size_t len, unsigned int flags);
+ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len);
-extern long do_tee(struct file *in, struct file *out, size_t len,
- unsigned int flags);
-extern ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
- loff_t *ppos, size_t len, unsigned int flags);
+static inline long splice_copy_file_range(struct file *in, loff_t pos_in,
+ struct file *out, loff_t pos_out,
+ size_t len)
+{
+ return splice_file_range(in, &pos_in, out, &pos_out, len);
+}
+
+ssize_t do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags);
+ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 2a4ce4144f..28e8c8bd39 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -120,6 +120,9 @@ static inline void spmi_controller_put(struct spmi_controller *ctrl)
int spmi_controller_add(struct spmi_controller *ctrl);
void spmi_controller_remove(struct spmi_controller *ctrl);
+struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size);
+int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl);
+
/**
* struct spmi_driver - SPMI slave device driver
* @driver: SPMI device drivers should initialize name and owner field of
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index e58306783d..bf0136891a 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,8 +11,6 @@
* SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
* stack traces often repeat, using stack depot allows to save about 100x space.
*
- * Stack traces are never removed from the stack depot.
- *
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
@@ -32,6 +30,64 @@ typedef u32 depot_stack_handle_t;
*/
#define STACK_DEPOT_EXTRA_BITS 5
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
+#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
+ STACK_DEPOT_EXTRA_BITS)
+
+#ifdef CONFIG_STACKDEPOT
+/* Compact structure that stores a reference to a stack. */
+union handle_parts {
+ depot_stack_handle_t handle;
+ struct {
+ u32 pool_index_plus_1 : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
+ };
+};
+
+struct stack_record {
+ struct list_head hash_list; /* Links in the hash table */
+ u32 hash; /* Hash in hash table */
+ u32 size; /* Number of stored frames */
+ union handle_parts handle; /* Constant after initialization */
+ refcount_t count;
+ union {
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
+ struct {
+ /*
+ * An important invariant of the implementation is to
+ * only place a stack record onto the freelist iff its
+ * refcount is zero. Because stack records with a zero
+ * refcount are never considered as valid, it is safe to
+ * union @entries and freelist management state below.
+ * Conversely, as soon as an entry is off the freelist
+ * and its refcount becomes non-zero, the below must not
+ * be accessed until being placed back on the freelist.
+ */
+ struct list_head free_list; /* Links in the freelist */
+ unsigned long rcu_state; /* RCU cookie */
+ };
+ };
+};
+#endif
+
+typedef u32 depot_flags_t;
+
+/*
+ * Flags that can be passed to stack_depot_save_flags(); see the comment next
+ * to its declaration for more details.
+ */
+#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
+#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
+
+#define STACK_DEPOT_FLAGS_NUM 2
+#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
+
/*
* Using stack depot requires its initialization, which can be done in 3 ways:
*
@@ -69,31 +125,39 @@ static inline int stack_depot_early_init(void) { return 0; }
#endif
/**
- * __stack_depot_save - Save a stack trace to stack depot
+ * stack_depot_save_flags - Save a stack trace to stack depot
*
* @entries: Pointer to the stack trace
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
- * @can_alloc: Allocate stack pools (increased chance of failure if false)
+ * @depot_flags: Stack depot flags
+ *
+ * Saves a stack trace from @entries array of size @nr_entries.
+ *
+ * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can
+ * replenish the stack pools in case no space is left (allocates using GFP
+ * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
+ * fails if no space is left to store the stack trace.
*
- * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, stack depot can replenish the stack pools in case no space is left
- * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and fails if no space is left to store the stack trace.
+ * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
+ * the refcount on the saved stack trace if it already exists in stack depot.
+ * Users of this flag must also call stack_depot_put() when keeping the stack
+ * trace is no longer required to avoid overflowing the refcount.
*
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
- * Context: Any context, but setting @can_alloc to %false is required if
+ * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
* Return: Handle of the stack struct stored in depot, 0 on failure
*/
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t gfp_flags, bool can_alloc);
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags,
+ depot_flags_t depot_flags);
/**
* stack_depot_save - Save a stack trace to stack depot
@@ -102,8 +166,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
*
- * Context: Contexts where allocations via alloc_pages() are allowed.
- * See __stack_depot_save() for more details.
+ * Does not increment the refcount on the saved stack trace; see
+ * stack_depot_save_flags() for more details.
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed;
+ * see stack_depot_save_flags() for more details.
*
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
@@ -142,6 +209,18 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
/**
+ * stack_depot_put - Drop a reference to a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ *
+ * The stack trace is evicted from stack depot once all references to it have
+ * been dropped (once the number of stack_depot_evict() calls matches the
+ * number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for
+ * this stack trace).
+ */
+void stack_depot_put(depot_stack_handle_t handle);
+
+/**
* stack_depot_set_extra_bits - Set extra bits in a stack depot handle
*
* @handle: Stack depot handle returned from stack_depot_save()
diff --git a/include/linux/string.h b/include/linux/string.h
index ce137830a0..ab148d8dbf 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -66,9 +66,6 @@ extern char * strcpy(char *,const char *);
#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *, const char *, size_t);
-#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *, const char *, size_t);
#endif
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index db30a159f9..f22bf915dc 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -20,7 +20,8 @@
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
-void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
+void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
+ const struct rpc_timeout *to);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index e9d4377d03..5e9d1469c6 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -252,7 +252,6 @@ void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *,
const char *rpc_proc_name(const struct rpc_task *task);
-void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 8ada7dc802..0c77ba488b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -38,6 +38,17 @@ struct rpc_wait {
};
/*
+ * This describes a timeout strategy
+ */
+struct rpc_timeout {
+ unsigned long to_initval, /* initial timeout */
+ to_maxval, /* max timeout */
+ to_increment; /* if !exponential */
+ unsigned int to_retries; /* max # of retries */
+ unsigned char to_exponential;
+};
+
+/*
* This is the RPC task struct
*/
struct rpc_task {
@@ -186,7 +197,7 @@ struct rpc_wait_queue {
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
- unsigned short qlen; /* total # tasks waiting in queue */
+ unsigned int qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
const char * name;
@@ -205,7 +216,8 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ struct rpc_timeout *timeout);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index b10f987509..67cf1c9efd 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -69,7 +69,6 @@ struct svc_serv {
struct svc_program * sv_program; /* RPC program */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
- struct kref sv_refcnt;
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -97,31 +96,13 @@ struct svc_serv {
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
-/**
- * svc_get() - increment reference count on a SUNRPC serv
- * @serv: the svc_serv to have count incremented
- *
- * Returns: the svc_serv that was passed in.
- */
-static inline struct svc_serv *svc_get(struct svc_serv *serv)
-{
- kref_get(&serv->sv_refcnt);
- return serv;
-}
-
-void svc_destroy(struct kref *);
+/* This is used by pool_stats to find and lock an svc */
+struct svc_info {
+ struct svc_serv *serv;
+ struct mutex *mutex;
+};
-/**
- * svc_put - decrement reference count on a SUNRPC serv
- * @serv: the svc_serv to have count decremented
- *
- * When the reference count reaches zero, svc_destroy()
- * is called to clean up and free the serv.
- */
-static inline void svc_put(struct svc_serv *serv)
-{
- kref_put(&serv->sv_refcnt, svc_destroy);
-}
+void svc_destroy(struct svc_serv **svcp);
/*
* Maximum payload size supported by a kernel RPC server.
@@ -250,6 +231,8 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ unsigned long bc_to_initval;
+ unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
unsigned int rq_status_counter; /* RPC processing counter */
};
@@ -260,8 +243,6 @@ enum {
RQ_LOCAL, /* local request */
RQ_USEDEFERRAL, /* use deferral */
RQ_DROPME, /* drop current reply */
- RQ_SPLICE_OK, /* turned off in gss privacy to prevent
- * encrypting page cache pages */
RQ_VICTIM, /* Have agreed to shut down */
RQ_DATA, /* request has data */
};
@@ -433,7 +414,7 @@ void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+int svc_pool_stats_open(struct svc_info *si, struct file *file);
void svc_process(struct svc_rqst *rqstp);
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index a5ee0af2a3..e7595ae62f 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -65,6 +65,7 @@ extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
+extern struct workqueue_struct *svcrdma_wq;
extern struct percpu_counter svcrdma_stat_read;
extern struct percpu_counter svcrdma_stat_recv;
@@ -97,6 +98,7 @@ struct svcxprt_rdma {
u32 sc_pending_recvs;
u32 sc_recv_batch;
struct list_head sc_rq_dto_q;
+ struct list_head sc_read_complete_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
@@ -115,6 +117,13 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
+static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
+{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ return container_of(xprt, struct svcxprt_rdma, sc_xprt);
+}
+
/*
* Default connection parameters
*/
@@ -126,6 +135,43 @@ enum {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+/**
+ * svc_rdma_send_cid_init - Initialize a Receive Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_rq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/**
+ * svc_rdma_send_cid_init - Initialize a Send Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/*
+ * A chunk context tracks all I/O for moving one Read or Write
+ * chunk. This is a set of rdma_rw's that handle data movement
+ * for all segments of one chunk.
+ */
+struct svc_rdma_chunk_ctxt {
+ struct rpc_rdma_cid cc_cid;
+ struct ib_cqe cc_cqe;
+ struct list_head cc_rwctxts;
+ ktime_t cc_posttime;
+ int cc_sqecount;
+};
+
struct svc_rdma_recv_ctxt {
struct llist_node rc_node;
struct list_head rc_list;
@@ -136,22 +182,33 @@ struct svc_rdma_recv_ctxt {
void *rc_recv_buf;
struct xdr_stream rc_stream;
u32 rc_byte_len;
- unsigned int rc_page_count;
u32 rc_inv_rkey;
__be32 rc_msgtype;
+ /* State for pulling a Read chunk */
+ unsigned int rc_pageoff;
+ unsigned int rc_curpage;
+ unsigned int rc_readbytes;
+ struct xdr_buf rc_saved_arg;
+ struct svc_rdma_chunk_ctxt rc_cc;
+
struct svc_rdma_pcl rc_call_pcl;
struct svc_rdma_pcl rc_read_pcl;
struct svc_rdma_chunk *rc_cur_result_payload;
struct svc_rdma_pcl rc_write_pcl;
struct svc_rdma_pcl rc_reply_pcl;
+
+ unsigned int rc_page_count;
+ struct page *rc_pages[RPCSVC_MAXPAGES];
};
struct svc_rdma_send_ctxt {
struct llist_node sc_node;
struct rpc_rdma_cid sc_cid;
+ struct work_struct sc_work;
+ struct svcxprt_rdma *sc_rdma;
struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
@@ -180,6 +237,11 @@ extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
+extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc);
+extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc,
+ enum dma_data_direction dir);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
const struct svc_rdma_chunk *chunk,
const struct xdr_buf *xdr);
@@ -200,7 +262,8 @@ extern int svc_rdma_send(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
- const struct svc_rdma_recv_ctxt *rctxt,
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
const struct xdr_buf *xdr);
extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 6f90203edb..61c455f1e1 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -131,8 +131,11 @@ enum svc_auth_status {
* This call releases a domain.
*
* set_client()
- * Givens a pending request (struct svc_rqst), finds and assigns
+ * Given a pending request (struct svc_rqst), finds and assigns
* an appropriate 'auth_domain' as the client.
+ *
+ * pseudoflavor()
+ * Returns RPC_AUTH pseudoflavor in use by @rqstp.
*/
struct auth_ops {
char * name;
@@ -143,11 +146,13 @@ struct auth_ops {
int (*release)(struct svc_rqst *rqstp);
void (*domain_release)(struct auth_domain *dom);
enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
+ rpc_authflavor_t (*pseudoflavor)(struct svc_rqst *rqstp);
};
struct svc_xprt;
extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
+extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index f85d3a0dac..464f6a9492 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -30,17 +30,6 @@
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-/*
- * This describes a timeout strategy
- */
-struct rpc_timeout {
- unsigned long to_initval, /* initial timeout */
- to_maxval, /* max timeout */
- to_increment; /* if !exponential */
- unsigned int to_retries; /* max # of retries */
- unsigned char to_exponential;
-};
-
enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index 42b249b4c2..8cd8c38cf3 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -193,7 +193,6 @@ struct ssam_device_driver {
#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
-extern struct bus_type ssam_bus_type;
extern const struct device_type ssam_device_type;
/**
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
index 5c4ae1a261..d8dbef6b7f 100644
--- a/include/linux/surface_aggregator/serial_hub.h
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -12,7 +12,7 @@
#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
-#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
@@ -188,7 +188,7 @@ static_assert(sizeof(struct ssh_command) == 8);
*/
static inline u16 ssh_crc(const u8 *buf, size_t len)
{
- return crc_ccitt_false(0xffff, buf, len);
+ return crc_itu_t(0xffff, buf, len);
}
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e73285e3c8..8d28f6091a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio);
void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma);
-
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
@@ -490,13 +487,12 @@ extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
-extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
+struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
-sector_t swap_page_sector(struct page *page);
+sector_t swap_folio_sector(struct folio *folio);
static inline void put_swap_device(struct swap_info_struct *si)
{
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index bff1e8d97d..925c84653a 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -390,6 +390,35 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
}
#endif /* CONFIG_MIGRATION */
+#ifdef CONFIG_MEMORY_FAILURE
+
+/*
+ * Support for hardware poisoned pages
+ */
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+}
+
+static inline int is_hwpoison_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_HWPOISON;
+}
+
+#else
+
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
+static inline int is_hwpoison_entry(swp_entry_t swp)
+{
+ return 0;
+}
+#endif
+
typedef unsigned long pte_marker;
#define PTE_MARKER_UFFD_WP BIT(0)
@@ -470,8 +499,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
/*
* A pfn swap entry is a special type of swap entry that always has a pfn stored
- * in the swap offset. They are used to represent unaddressable device memory
- * and to restrict access to a page undergoing migration.
+ * in the swap offset. They can either be used to represent unaddressable device
+ * memory, to restrict access to a page undergoing migration or to represent a
+ * pfn which has been hwpoisoned and unmapped.
*/
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{
@@ -479,7 +509,7 @@ static inline bool is_pfn_swap_entry(swp_entry_t entry)
BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
return is_migration_entry(entry) || is_device_private_entry(entry) ||
- is_device_exclusive_entry(entry);
+ is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
}
struct page_vma_mapped_walk;
@@ -548,35 +578,6 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-#ifdef CONFIG_MEMORY_FAILURE
-
-/*
- * Support for hardware poisoned pages
- */
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
- BUG_ON(!PageLocked(page));
- return swp_entry(SWP_HWPOISON, page_to_pfn(page));
-}
-
-static inline int is_hwpoison_entry(swp_entry_t entry)
-{
- return swp_type(entry) == SWP_HWPOISON;
-}
-
-#else
-
-static inline swp_entry_t make_hwpoison_entry(struct page *page)
-{
- return swp_entry(0, 0);
-}
-
-static inline int is_hwpoison_entry(swp_entry_t swp)
-{
- return 0;
-}
-#endif
-
static inline int non_swap_entry(swp_entry_t entry)
{
return swp_type(entry) >= MAX_SWAPFILES;
diff --git a/include/linux/syscall_user_dispatch.h b/include/linux/syscall_user_dispatch.h
index 641ca88809..3858a6ffdd 100644
--- a/include/linux/syscall_user_dispatch.h
+++ b/include/linux/syscall_user_dispatch.h
@@ -6,16 +6,10 @@
#define _SYSCALL_USER_DISPATCH_H
#include <linux/thread_info.h>
+#include <linux/syscall_user_dispatch_types.h>
#ifdef CONFIG_GENERIC_ENTRY
-struct syscall_user_dispatch {
- char __user *selector;
- unsigned long offset;
- unsigned long len;
- bool on_dispatch;
-};
-
int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char __user *selector);
@@ -29,7 +23,6 @@ int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long siz
void __user *data);
#else
-struct syscall_user_dispatch {};
static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char __user *selector)
diff --git a/include/linux/syscall_user_dispatch_types.h b/include/linux/syscall_user_dispatch_types.h
new file mode 100644
index 0000000000..3be36b06c7
--- /dev/null
+++ b/include/linux/syscall_user_dispatch_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SYSCALL_USER_DISPATCH_TYPES_H
+#define _SYSCALL_USER_DISPATCH_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_GENERIC_ENTRY
+
+struct syscall_user_dispatch {
+ char __user *selector;
+ unsigned long offset;
+ unsigned long len;
+ bool on_dispatch;
+};
+
+#else
+
+struct syscall_user_dispatch {};
+
+#endif
+
+#endif /* _SYSCALL_USER_DISPATCH_TYPES_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 59fdd40740..e619ac10cd 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -71,9 +71,12 @@ struct clone_args;
struct open_how;
struct mount_attr;
struct landlock_ruleset_attr;
+struct lsm_ctx;
enum landlock_rule_type;
struct cachestat_range;
struct cachestat;
+struct statmount;
+struct mnt_id_req;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -408,6 +411,12 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz,
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
struct statfs64 __user *buf);
+asmlinkage long sys_statmount(const struct mnt_id_req __user *req,
+ struct statmount __user *buf, size_t bufsize,
+ unsigned int flags);
+asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
+ u64 __user *mnt_ids, size_t nr_mnt_ids,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
#if BITS_PER_LONG == 32
@@ -950,6 +959,11 @@ asmlinkage long sys_cachestat(unsigned int fd,
struct cachestat_range __user *cstat_range,
struct cachestat __user *cstat, unsigned int flags);
asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ u32 *size, u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ u32 size, u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 61b40ea81f..ee7d33b89e 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -210,11 +210,6 @@ struct ctl_table_root {
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
};
-/* struct ctl_path describes where in the hierarchy a table is added */
-struct ctl_path {
- const char *procname;
-};
-
#define register_sysctl(path, table) \
register_sysctl_sz(path, table, ARRAY_SIZE(table))
@@ -255,8 +250,6 @@ extern int unaligned_enabled;
extern int unaligned_dump_stack;
extern int no_unaligned_warning;
-#define SYSCTL_PERM_EMPTY_DIR (1 << 0)
-
#else /* CONFIG_SYSCTL */
static inline void register_sysctl_init(const char *path, struct ctl_table *table)
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b646b574b0..a1c47a6d69 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -190,23 +190,123 @@ static inline bool tcp_rsk_used_ao(const struct request_sock *req)
#define TCP_RMEM_TO_WIN_SCALE 8
struct tcp_sock {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/tcp_sock.rst.
+ * Please update the document when adding new fields.
+ */
+
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
- u16 tcp_header_len; /* Bytes of tcp header to send */
+
+ /* TX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_tx);
+ /* timestamp of last sent data packet (for restart window) */
+ u32 max_window; /* Maximal window ever seen from peer */
+ u32 rcv_ssthresh; /* Current window clamp */
+ u32 reordering; /* Packet reordering metric. */
+ u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u16 gso_segs; /* Max number of segs per GSO packet */
+ /* from STCP, retrans queue hinting */
+ struct sk_buff *lost_skb_hint;
+ struct sk_buff *retransmit_skb_hint;
+ __cacheline_group_end(tcp_sock_read_tx);
+
+ /* TXRX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_txrx);
+ u32 tsoffset; /* timestamp offset */
+ u32 snd_wnd; /* The window we expect to receive */
+ u32 mss_cache; /* Cached effective mss, not including SACKS */
+ u32 snd_cwnd; /* Sending congestion window */
+ u32 prr_out; /* Total number of pkts sent during Recovery. */
+ u32 lost_out; /* Lost packets */
+ u32 sacked_out; /* SACK'd packets */
+ u16 tcp_header_len; /* Bytes of tcp header to send */
+ u8 scaling_ratio; /* see tcp_win_from_space() */
+ u8 chrono_type : 2, /* current chronograph type */
+ repair : 1,
+ tcp_usec_ts : 1, /* TSval values in usec */
+ is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
+ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ __cacheline_group_end(tcp_sock_read_txrx);
+
+ /* RX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_rx);
+ u32 copied_seq; /* Head of yet unread data */
+ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
+ u32 snd_wl1; /* Sequence for window update */
+ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
+ u32 rttvar_us; /* smoothed mdev_max */
+ u32 retrans_out; /* Retransmitted packets out */
+ u16 advmss; /* Advertised MSS */
+ u16 urg_data; /* Saved octet of OOB data and control flags */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ struct minmax rtt_min;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ u32 snd_ssthresh; /* Slow start size threshold */
+ __cacheline_group_end(tcp_sock_read_rx);
+
+ /* TX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_tx) ____cacheline_aligned;
+ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
+ * total number of data bytes sent.
+ */
+ u32 snd_sml; /* Last byte of the most recently transmitted small packet */
+ u32 chrono_start; /* Start time in jiffies of a TCP chrono */
+ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
+ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
+ u32 pushed_seq; /* Last pushed seq, required to talk to windows */
+ u32 lsndtime;
+ u32 mdev_us; /* medium deviation */
+ u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
+ u64 tcp_mstamp; /* most recent packet received/sent */
+ u32 rtt_seq; /* sequence number to update rttvar */
+ struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
+ struct sk_buff *highest_sack; /* skb just after the highest
+ * skb with SACKed bit set
+ * (validity guaranteed only if
+ * sacked_out > 0)
+ */
+ u8 ecn_flags; /* ECN status bits. */
+ __cacheline_group_end(tcp_sock_write_tx);
+ /* TXRX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_txrx);
/*
* Header prediction flags
* 0x5?10 << 16 + snd_wnd in net byte order
*/
__be32 pred_flags;
-
+ u32 rcv_nxt; /* What we want to receive next */
+ u32 snd_nxt; /* Next sequence we send */
+ u32 snd_una; /* First byte we want an ack for */
+ u32 window_clamp; /* Maximal window to advertise */
+ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ u32 packets_out; /* Packets which are "in flight" */
+ u32 snd_up; /* Urgent pointer */
+ u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 delivered_ce; /* Like the above but only ECE marked packets */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ u32 rcv_wnd; /* Current receiver window */
/*
- * RFC793 variables by their proper names. This means you can
- * read the code and the spec side by side (and laugh ...)
- * See RFC793 and RFC1122. The RFC writes these in capitals.
+ * Options received (usually on last packet, some only on SYN packets).
*/
- u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ struct tcp_options_received rx_opt;
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
+ __cacheline_group_end(tcp_sock_write_txrx);
+
+ /* RX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_rx);
+ u64 bytes_received;
+ /* RFC4898 tcpEStatsAppHCThruOctetsReceived
* sum(delta(rcv_nxt)), or how many bytes
* were acked.
*/
@@ -216,46 +316,44 @@ struct tcp_sock {
u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
* total number of data segments in.
*/
- u32 rcv_nxt; /* What we want to receive next */
- u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
- u32 snd_nxt; /* Next sequence we send */
- u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
- * The total number of segments sent.
- */
- u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
- * total number of data segments sent.
- */
- u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
- * total number of data bytes sent.
- */
+ u32 max_packets_out; /* max packets_out in last window */
+ u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
+ u32 rcv_rtt_last_tsecr;
+ u64 first_tx_mstamp; /* start of window send phase */
+ u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ struct {
+ u32 rtt_us;
+ u32 seq;
+ u64 time;
+ } rcv_rtt_est;
+/* Receiver queue space */
+ struct {
+ u32 space;
+ u32 seq;
+ u64 time;
+ } rcvq_space;
+ __cacheline_group_end(tcp_sock_write_rx);
+ /* End of Hot Path */
+
+/*
+ * RFC793 variables by their proper names. This means you can
+ * read the code and the spec side by side (and laugh ...)
+ * See RFC793 and RFC1122. The RFC writes these in capitals.
+ */
u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
* total number of DSACK blocks received
*/
- u32 snd_una; /* First byte we want an ack for */
- u32 snd_sml; /* Last byte of the most recently transmitted small packet */
- u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
- u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 compressed_ack_rcv_nxt;
-
- u32 tsoffset; /* timestamp offset */
-
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
-
- u32 snd_wl1; /* Sequence for window update */
- u32 snd_wnd; /* The window we expect to receive */
- u32 max_window; /* Maximal window ever seen from peer */
- u32 mss_cache; /* Cached effective mss, not including SACKS */
- u32 window_clamp; /* Maximal window to advertise */
- u32 rcv_ssthresh; /* Current window clamp */
- u8 scaling_ratio; /* see tcp_win_from_space() */
/* Information of the most recently (s)acked skb */
struct tcp_rack {
u64 mstamp; /* (Re)sent time of the skb */
@@ -268,24 +366,15 @@ struct tcp_sock {
dsack_seen:1, /* Whether DSACK seen after last adj */
advanced:1; /* mstamp advanced since last lost marking */
} rack;
- u16 advmss; /* Advertised MSS */
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
- tcp_usec_ts:1, /* TSval values in usec */
- unused:4;
- u32 chrono_start; /* Start time in jiffies of a TCP chrono */
- u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
- u8 chrono_type:2, /* current chronograph type */
- rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:5;
+ u8 thin_lto : 1,/* Use linear timeouts for thin streams */
+ recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
- is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- fastopen_client_fail:2; /* reason why fastopen failed */
- u8 nonagle : 4,/* Disable Nagle algorithm? */
- thin_lto : 1,/* Use linear timeouts for thin streams */
- recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
- repair : 1,
+ fastopen_client_fail:2, /* reason why fastopen failed */
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
u8 save_syn:2, /* Save headers of SYN packet */
@@ -293,45 +382,19 @@ struct tcp_sock {
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
- syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
- is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
- u32 tlp_high_seq; /* snd_nxt at the time of TLP */
+ syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
- u64 tcp_wstamp_ns; /* departure time for next sent data packet */
- u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
/* RTT measurement */
- u64 tcp_mstamp; /* most recent packet received/sent */
- u32 srtt_us; /* smoothed round trip time << 3 in usecs */
- u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
- u32 rttvar_us; /* smoothed mdev_max */
- u32 rtt_seq; /* sequence number to update rttvar */
- struct minmax rtt_min;
- u32 packets_out; /* Packets which are "in flight" */
- u32 retrans_out; /* Retransmitted packets out */
- u32 max_packets_out; /* max packets_out in last window */
- u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
-
- u16 urg_data; /* Saved octet of OOB data and control flags */
- u8 ecn_flags; /* ECN status bits. */
u8 keepalive_probes; /* num of allowed keep alive probes */
- u32 reordering; /* Packet reordering metric. */
u32 reord_seen; /* number of data packet reordering events */
- u32 snd_up; /* Urgent pointer */
-
-/*
- * Options received (usually on last packet, some only on SYN packets).
- */
- struct tcp_options_received rx_opt;
/*
* Slow start and congestion control (see also Nagle, and Karn & Partridge)
*/
- u32 snd_ssthresh; /* Slow start size threshold */
- u32 snd_cwnd; /* Sending congestion window */
u32 snd_cwnd_cnt; /* Linear increase counter */
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used;
@@ -339,32 +402,10 @@ struct tcp_sock {
u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
- u32 prr_out; /* Total number of pkts sent during Recovery. */
- u32 delivered; /* Total data packets delivered incl. rexmits */
- u32 delivered_ce; /* Like the above but only ECE marked packets */
- u32 lost; /* Total data packets lost incl. rexmits */
- u32 app_limited; /* limited until "delivered" reaches this val */
- u64 first_tx_mstamp; /* start of window send phase */
- u64 delivered_mstamp; /* time we reached "delivered" */
- u32 rate_delivered; /* saved rate sample: packets delivered */
- u32 rate_interval_us; /* saved rate sample: time elapsed */
-
- u32 rcv_wnd; /* Current receiver window */
- u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
- u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
- u32 pushed_seq; /* Last pushed seq, required to talk to windows */
- u32 lost_out; /* Lost packets */
- u32 sacked_out; /* SACK'd packets */
struct hrtimer pacing_timer;
struct hrtimer compressed_ack_timer;
- /* from STCP, retrans queue hinting */
- struct sk_buff* lost_skb_hint;
- struct sk_buff *retransmit_skb_hint;
-
- /* OOO segments go in this rbtree. Socket lock must be held. */
- struct rb_root out_of_order_queue;
struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
@@ -373,12 +414,6 @@ struct tcp_sock {
struct tcp_sack_block recv_sack_cache[4];
- struct sk_buff *highest_sack; /* skb just after the highest
- * skb with SACKed bit set
- * (validity guaranteed only if
- * sacked_out > 0)
- */
-
int lost_cnt_hint;
u32 prior_ssthresh; /* ssthresh saved at recovery start */
@@ -429,21 +464,6 @@ struct tcp_sock {
u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
-/* Receiver side RTT estimation */
- u32 rcv_rtt_last_tsecr;
- struct {
- u32 rtt_us;
- u32 seq;
- u64 time;
- } rcv_rtt_est;
-
-/* Receiver queue space */
- struct {
- u32 space;
- u32 seq;
- u64 time;
- } rcvq_space;
-
/* TCP-specific MTU probe information. */
struct {
u32 probe_seq_start;
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 17eb1c5205..911ddf92dc 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -84,6 +84,7 @@ struct tee_param {
* @release: release this open file
* @open_session: open a new session
* @close_session: close a session
+ * @system_session: declare session as a system session
* @invoke_func: invoke a trusted function
* @cancel_req: request cancel of an ongoing invoke or open
* @supp_recv: called for supplicant to get a command
@@ -100,6 +101,7 @@ struct tee_driver_ops {
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*system_session)(struct tee_context *ctx, u32 session);
int (*invoke_func)(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
@@ -430,6 +432,20 @@ int tee_client_open_session(struct tee_context *ctx,
int tee_client_close_session(struct tee_context *ctx, u32 session);
/**
+ * tee_client_system_session() - Declare session as a system session
+ * @ctx: TEE Context
+ * @session: Session id
+ *
+ * This function requests TEE to provision an entry context ready to use for
+ * that session only. The provisioned entry context is used for command
+ * invocation and session closure, not for command cancelling requests.
+ * TEE releases the provisioned context upon session closure.
+ *
+ * Return < 0 on error else 0 if an entry context has been provisioned.
+ */
+int tee_client_system_session(struct tee_context *ctx, u32 session);
+
+/**
* tee_client_invoke_func() - Invoke a function in a Trusted Application
* @ctx: TEE Context
* @arg: Invoke arguments, see description of
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 1da1739d75..b7a3deb372 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -32,6 +32,7 @@
struct thermal_zone_device;
struct thermal_cooling_device;
struct thermal_instance;
+struct thermal_debugfs;
struct thermal_attr;
enum thermal_trend {
@@ -51,18 +52,23 @@ enum thermal_notify_event {
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */
THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
+ THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
+ THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
+ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
};
/**
* struct thermal_trip - representation of a point in temperature domain
* @temperature: temperature value in miliCelsius
* @hysteresis: relative hysteresis in miliCelsius
+ * @threshold: trip crossing notification threshold miliCelsius
* @type: trip point type
* @priv: pointer to driver data associated with this trip
*/
struct thermal_trip {
int temperature;
int hysteresis;
+ int threshold;
enum thermal_trip_type type;
void *priv;
};
@@ -97,7 +103,7 @@ struct thermal_cooling_device_ops {
struct thermal_cooling_device {
int id;
- char *type;
+ const char *type;
unsigned long max_state;
struct device device;
struct device_node *np;
@@ -108,6 +114,9 @@ struct thermal_cooling_device {
struct mutex lock; /* protect thermal_instances list */
struct list_head thermal_instances;
struct list_head node;
+#ifdef CONFIG_THERMAL_DEBUGFS
+ struct thermal_debugfs *debugfs;
+#endif
};
/**
@@ -115,6 +124,7 @@ struct thermal_cooling_device {
* @id: unique id number for each thermal zone
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
+ * @removal: removal completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
@@ -155,6 +165,7 @@ struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct completion removal;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
@@ -182,6 +193,9 @@ struct thermal_zone_device {
struct list_head node;
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
+#ifdef CONFIG_THERMAL_DEBUGFS
+ struct thermal_debugfs *debugfs;
+#endif
bool suspended;
};
@@ -195,6 +209,8 @@ struct thermal_zone_device {
* thermal zone.
* @throttle: callback called for every trip point even if temperature is
* below the trip point temperature
+ * @update_tz: callback called when thermal zone internals have changed, e.g.
+ * thermal cooling instance was added/removed
* @governor_list: node in thermal_governor_list (in thermal_core.c)
*/
struct thermal_governor {
@@ -203,6 +219,8 @@ struct thermal_governor {
void (*unbind_from_tz)(struct thermal_zone_device *tz);
int (*throttle)(struct thermal_zone_device *tz,
const struct thermal_trip *trip);
+ void (*update_tz)(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
struct list_head governor_list;
};
@@ -282,10 +300,6 @@ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
-
-int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
- const struct thermal_trip *trip);
-
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
@@ -293,16 +307,11 @@ int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp);
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp);
-#ifdef CONFIG_THERMAL_ACPI
-int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
-int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
-#endif
-
#ifdef CONFIG_THERMAL
struct thermal_zone_device *thermal_zone_device_register_with_trips(
const char *type,
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 6151c210d9..2c835e5c41 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -86,7 +86,7 @@ struct tb {
unsigned long privdata[];
};
-extern struct bus_type tb_bus_type;
+extern const struct bus_type tb_bus_type;
extern struct device_type tb_service_type;
extern struct device_type tb_xdomain_type;
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index 03d9c5ac01..876e31b446 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -7,10 +7,13 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
+#include <linux/time64.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
+struct vm_area_struct;
+
struct timens_offsets {
struct timespec64 monotonic;
struct timespec64 boottime;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index fe1e467ba0..7c43e98cf2 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -4,6 +4,7 @@
#include <linux/errno.h>
#include <linux/clocksource_ids.h>
+#include <linux/ktime.h>
/* Included from linux/ktime.h */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 26a545bb01..f18a2f1eb7 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -7,21 +7,7 @@
#include <linux/stddef.h>
#include <linux/debugobjects.h>
#include <linux/stringify.h>
-
-struct timer_list {
- /*
- * All fields that change during normal runtime grouped to the
- * same cacheline
- */
- struct hlist_node entry;
- unsigned long expires;
- void (*function)(struct timer_list *);
- u32 flags;
-
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
-#endif
-};
+#include <linux/timer_types.h>
#ifdef CONFIG_LOCKDEP
/*
diff --git a/include/linux/timer_types.h b/include/linux/timer_types.h
new file mode 100644
index 0000000000..fae5a388f9
--- /dev/null
+++ b/include/linux/timer_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMER_TYPES_H
+#define _LINUX_TIMER_TYPES_H
+
+#include <linux/lockdep_types.h>
+#include <linux/types.h>
+
+struct timer_list {
+ /*
+ * All fields that change during normal runtime grouped to the
+ * same cacheline
+ */
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(struct timer_list *);
+ u32 flags;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+#endif /* _LINUX_TIMER_TYPES_H */
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index adc80e2916..62973f7d46 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -3,18 +3,7 @@
#define _LINUX_TIMERQUEUE_H
#include <linux/rbtree.h>
-#include <linux/ktime.h>
-
-
-struct timerqueue_node {
- struct rb_node node;
- ktime_t expires;
-};
-
-struct timerqueue_head {
- struct rb_root_cached rb_root;
-};
-
+#include <linux/timerqueue_types.h>
extern bool timerqueue_add(struct timerqueue_head *head,
struct timerqueue_node *node);
diff --git a/include/linux/timerqueue_types.h b/include/linux/timerqueue_types.h
new file mode 100644
index 0000000000..dc298d0923
--- /dev/null
+++ b/include/linux/timerqueue_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMERQUEUE_TYPES_H
+#define _LINUX_TIMERQUEUE_TYPES_H
+
+#include <linux/rbtree_types.h>
+#include <linux/types.h>
+
+struct timerqueue_node {
+ struct rb_node node;
+ ktime_t expires;
+};
+
+struct timerqueue_head {
+ struct rb_root_cached rb_root;
+};
+
+#endif /* _LINUX_TIMERQUEUE_TYPES_H */
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 1c3948a1d6..3c13240077 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -106,6 +106,10 @@ int tnum_sbin(char *str, size_t size, struct tnum a);
struct tnum tnum_subreg(struct tnum a);
/* Returns the tnum with the lower 32-bit subreg cleared */
struct tnum tnum_clear_subreg(struct tnum a);
+/* Returns the tnum with the lower 32-bit subreg in *reg* set to the lower
+ * 32-bit subreg in *subreg*
+ */
+struct tnum tnum_with_subreg(struct tnum reg, struct tnum subreg);
/* Returns the tnum with the lower 32-bit subreg set to value */
struct tnum tnum_const_subreg(struct tnum a, u32 value);
/* Returns true if 32-bit subreg @a is a known constant*/
diff --git a/include/linux/torture.h b/include/linux/torture.h
index c98d0c83d1..1541454da0 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -21,6 +21,7 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/hrtimer.h>
/* Definitions for a non-string torture-test module parameter. */
#define torture_param(type, name, init, msg) \
diff --git a/include/linux/trace.h b/include/linux/trace.h
index 2a70a44718..fdcd76b7be 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -51,7 +51,7 @@ int trace_array_printk(struct trace_array *tr, unsigned long ip,
const char *fmt, ...);
int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr);
-struct trace_array *trace_array_get_by_name(const char *name);
+struct trace_array *trace_array_get_by_name(const char *name, const char *systems);
int trace_array_destroy(struct trace_array *tr);
/* For osnoise tracer */
@@ -84,7 +84,7 @@ static inline int trace_array_init_printk(struct trace_array *tr)
static inline void trace_array_put(struct trace_array *tr)
{
}
-static inline struct trace_array *trace_array_get_by_name(const char *name)
+static inline struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
{
return NULL;
}
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index d68ff9b124..fc6d0af56b 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -103,13 +103,16 @@ struct trace_iterator {
unsigned int temp_size;
char *fmt; /* modified format holder */
unsigned int fmt_size;
- long wait_index;
+ atomic_t wait_index;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
cpumask_var_t started;
+ /* Set when the file is closed to prevent new waiters */
+ bool closed;
+
/* it's true when current open file is snapshot */
bool snapshot;
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 3691e0e76a..1ef95c0287 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -8,11 +8,20 @@
/*
* Trace sequences are used to allow a function to call several other functions
- * to create a string of data to use (up to a max of PAGE_SIZE).
+ * to create a string of data to use.
+ *
+ * Have the trace seq to be 8K which is typically PAGE_SIZE * 2 on
+ * most architectures. The TRACE_SEQ_BUFFER_SIZE (which is
+ * TRACE_SEQ_SIZE minus the other fields of trace_seq), is the
+ * max size the output of a trace event may be.
*/
+#define TRACE_SEQ_SIZE 8192
+#define TRACE_SEQ_BUFFER_SIZE (TRACE_SEQ_SIZE - \
+ (sizeof(struct seq_buf) + sizeof(size_t) + sizeof(int)))
+
struct trace_seq {
- char buffer[PAGE_SIZE];
+ char buffer[TRACE_SEQ_BUFFER_SIZE];
struct seq_buf seq;
size_t readpos;
int full;
@@ -21,7 +30,7 @@ struct trace_seq {
static inline void
trace_seq_init(struct trace_seq *s)
{
- seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
+ seq_buf_init(&s->seq, s->buffer, TRACE_SEQ_BUFFER_SIZE);
s->full = 0;
s->readpos = 0;
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 4b6340ac2a..8c76fd97d4 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -242,7 +242,7 @@ struct tty_struct {
void *driver_data;
spinlock_t files_lock;
int write_cnt;
- unsigned char *write_buf;
+ u8 *write_buf;
struct list_head tty_files;
@@ -393,8 +393,10 @@ extern const struct class tty_class;
* tty_kref_get - get a tty reference
* @tty: tty device
*
- * Returns: a new reference to a tty object. The caller must hold sufficient
- * locks/counts to ensure that their existing reference cannot go away
+ * Returns: a new reference to a tty object
+ *
+ * Locking: The caller must hold sufficient locks/counts to ensure that their
+ * existing reference cannot go away.
*/
static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
@@ -408,8 +410,8 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout);
void stop_tty(struct tty_struct *tty);
void start_tty(struct tty_struct *tty);
void tty_write_message(struct tty_struct *tty, char *msg);
-int tty_send_xchar(struct tty_struct *tty, char ch);
-int tty_put_char(struct tty_struct *tty, unsigned char c);
+int tty_send_xchar(struct tty_struct *tty, u8 ch);
+int tty_put_char(struct tty_struct *tty, u8 c);
unsigned int tty_chars_in_buffer(struct tty_struct *tty);
unsigned int tty_write_room(struct tty_struct *tty);
void tty_driver_flush_buffer(struct tty_struct *tty);
@@ -419,6 +421,7 @@ bool tty_unthrottle_safe(struct tty_struct *tty);
int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
int tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount);
+int tty_get_tiocm(struct tty_struct *tty);
int is_current_pgrp_orphaned(void);
void tty_hangup(struct tty_struct *tty);
void tty_vhangup(struct tty_struct *tty);
@@ -436,12 +439,11 @@ void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
* tty_get_baud_rate - get tty bit rates
* @tty: tty to query
*
- * Returns: the baud rate as an integer for this terminal. The termios lock
- * must be held by the caller and the terminal bit flags may be updated.
+ * Returns: the baud rate as an integer for this terminal
*
- * Locking: none
+ * Locking: The termios lock must be held by the caller.
*/
-static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
+static inline speed_t tty_get_baud_rate(const struct tty_struct *tty)
{
return tty_termios_baud_rate(&tty->termios);
}
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 18beff0cec..7372124fbf 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -72,8 +72,7 @@ struct serial_struct;
* is closed for the last time freeing up the resources. This is
* actually the second part of shutdown for routines that might sleep.
*
- * @write: ``ssize_t ()(struct tty_struct *tty, const unsigned char *buf,
- * size_t count)``
+ * @write: ``ssize_t ()(struct tty_struct *tty, const u8 *buf, size_t count)``
*
* This routine is called by the kernel to write a series (@count) of
* characters (@buf) to the @tty device. The characters may come from
@@ -85,7 +84,7 @@ struct serial_struct;
*
* Optional: Required for writable devices. May not sleep.
*
- * @put_char: ``int ()(struct tty_struct *tty, unsigned char ch)``
+ * @put_char: ``int ()(struct tty_struct *tty, u8 ch)``
*
* This routine is called by the kernel to write a single character @ch to
* the @tty device. If the kernel uses this routine, it must call the
@@ -243,7 +242,7 @@ struct serial_struct;
* Optional: If not provided, the device is assumed to have no FIFO.
* Usually correct to invoke via tty_wait_until_sent(). May sleep.
*
- * @send_xchar: ``void ()(struct tty_struct *tty, char ch)``
+ * @send_xchar: ``void ()(struct tty_struct *tty, u8 ch)``
*
* This routine is used to send a high-priority XON/XOFF character (@ch)
* to the @tty device.
@@ -375,7 +374,7 @@ struct tty_operations {
void (*flush_buffer)(struct tty_struct *tty);
void (*set_ldisc)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, int timeout);
- void (*send_xchar)(struct tty_struct *tty, char ch);
+ void (*send_xchar)(struct tty_struct *tty, u8 ch);
int (*tiocmget)(struct tty_struct *tty);
int (*tiocmset)(struct tty_struct *tty,
unsigned int set, unsigned int clear);
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index 6b367eb179..1b861f2100 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -114,8 +114,8 @@ struct tty_port {
unsigned char console:1;
struct mutex mutex;
struct mutex buf_mutex;
- unsigned char *xmit_buf;
- DECLARE_KFIFO_PTR(xmit_fifo, unsigned char);
+ u8 *xmit_buf;
+ DECLARE_KFIFO_PTR(xmit_fifo, u8);
unsigned int close_delay;
unsigned int closing_wait;
int drain_delay;
@@ -149,10 +149,10 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
const struct attribute_group **attr_grp);
struct device *tty_port_register_device_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
- struct device *device);
+ struct device *host, struct device *parent);
struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
- struct device *device, void *drvdata,
+ struct device *host, struct device *parent, void *drvdata,
const struct attribute_group **attr_grp);
void tty_port_unregister_device(struct tty_port *port,
struct tty_driver *driver, unsigned index);
diff --git a/include/linux/types.h b/include/linux/types.h
index 253168bb3f..2bc8766ba2 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -120,6 +120,9 @@ typedef s64 int64_t;
#define aligned_be64 __aligned_be64
#define aligned_le64 __aligned_le64
+/* Nanosecond scalar representation for kernel time values */
+typedef s64 ktime_t;
+
/**
* The type used for indexing onto a disc or disc partition.
*
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index ffe48e69b3..457879938f 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -135,10 +135,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
p->v++;
}
-static inline void u64_stats_init(struct u64_stats_sync *syncp)
-{
- seqcount_init(&syncp->seq);
-}
+#define u64_stats_init(syncp) \
+ do { \
+ struct u64_stats_sync *__s = (syncp); \
+ seqcount_init(&__s->seq); \
+ } while (0)
static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/udp.h b/include/linux/udp.h
index d04188714d..00790bb5cb 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -105,7 +105,7 @@ struct udp_sock {
#define udp_assign_bit(nr, sk, val) \
assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
-#define UDP_MAX_SEGMENTS (1 << 6UL)
+#define UDP_MAX_SEGMENTS (1 << 7UL)
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
@@ -140,6 +140,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
}
}
+DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
+#if IS_ENABLED(CONFIG_IPV6)
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+#endif
+
+static inline bool udp_encap_needed(void)
+{
+ if (static_branch_unlikely(&udp_encap_needed_key))
+ return true;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (static_branch_unlikely(&udpv6_encap_needed_key))
+ return true;
+#endif
+
+ return false;
+}
+
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
if (!skb_is_gso(skb))
@@ -153,6 +171,16 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
!udp_test_bit(ACCEPT_FRAGLIST, sk))
return true;
+ /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
+ * land in a tunnel as the socket check in udp_gro_receive cannot be
+ * foolproof.
+ */
+ if (udp_encap_needed() &&
+ READ_ONCE(udp_sk(sk)->encap_rcv) &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
+ return true;
+
return false;
}
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index b0542cd11a..f85ec56137 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -12,20 +12,12 @@
* to detect when we overlook these differences.
*
*/
-#include <linux/types.h>
+#include <linux/uidgid_types.h>
#include <linux/highuid.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
-
-typedef struct {
- uid_t val;
-} kuid_t;
-
-
-typedef struct {
- gid_t val;
-} kgid_t;
+struct uid_gid_map;
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
@@ -138,6 +130,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return from_kgid(ns, gid) != (gid_t) -1;
}
+u32 map_id_down(struct uid_gid_map *map, u32 id);
+u32 map_id_up(struct uid_gid_map *map, u32 id);
+
#else
static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid)
@@ -186,6 +181,15 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return gid_valid(gid);
}
+static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
+
+static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
#endif /* CONFIG_USER_NS */
#endif /* _LINUX_UIDGID_H */
diff --git a/include/linux/uidgid_types.h b/include/linux/uidgid_types.h
new file mode 100644
index 0000000000..b35ac4955a
--- /dev/null
+++ b/include/linux/uidgid_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UIDGID_TYPES_H
+#define _LINUX_UIDGID_TYPES_H
+
+#include <linux/types.h>
+
+typedef struct {
+ uid_t val;
+} kuid_t;
+
+typedef struct {
+ gid_t val;
+} kgid_t;
+
+#endif /* _LINUX_UIDGID_TYPES_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index b6214cbf2a..00cebe2b70 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -40,7 +40,6 @@ struct iov_iter_state {
struct iov_iter {
u8 iter_type;
- bool copy_mc;
bool nofault;
bool data_source;
size_t iov_offset;
@@ -248,22 +247,8 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#ifdef CONFIG_ARCH_HAS_COPY_MC
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
-static inline void iov_iter_set_copy_mc(struct iov_iter *i)
-{
- i->copy_mc = true;
-}
-
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
- return i->copy_mc;
-}
#else
#define _copy_mc_to_iter _copy_to_iter
-static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
- return false;
-}
#endif
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
@@ -347,8 +332,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec,
ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat);
-int import_single_range(int type, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i);
int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
@@ -357,7 +340,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_UBUF,
- .copy_mc = false,
.data_source = direction,
.ubuf = buf,
.count = count,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 8c61643acd..9e52179872 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -632,7 +632,6 @@ struct usb3_lpm_parameters {
* @reset_resume: needs reset instead of resume
* @port_is_suspended: the upstream port is suspended (L2 or U3)
* @slot_id: Slot ID assigned by xHCI
- * @removable: Device can be physically removed from this port
* @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
* @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
* @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
@@ -1145,16 +1144,6 @@ extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
/**
- * struct usbdrv_wrap - wrapper for driver-model structure
- * @driver: The driver-model core driver structure.
- * @for_devices: Non-zero for device drivers, 0 for interface drivers.
- */
-struct usbdrv_wrap {
- struct device_driver driver;
- int for_devices;
-};
-
-/**
* struct usb_driver - identifies USB interface driver to usbcore
* @name: The driver name should be unique among USB drivers,
* and should normally be the same as the module name.
@@ -1194,7 +1183,7 @@ struct usbdrv_wrap {
* is bound to the driver.
* @dynids: used internally to hold the list of dynamically added device
* ids for this driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
* @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
* added to this driver by preventing the sysfs file from being created.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1242,13 +1231,13 @@ struct usb_driver {
const struct attribute_group **dev_groups;
struct usb_dynids dynids;
- struct usbdrv_wrap drvwrap;
+ struct device_driver driver;
unsigned int no_dynamic_id:1;
unsigned int supports_autosuspend:1;
unsigned int disable_hub_initiated_lpm:1;
unsigned int soft_unbind:1;
};
-#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
+#define to_usb_driver(d) container_of(d, struct usb_driver, driver)
/**
* struct usb_device_driver - identifies USB device driver to usbcore
@@ -1264,9 +1253,12 @@ struct usb_driver {
* module is being unloaded.
* @suspend: Called when the device is going to be suspended by the system.
* @resume: Called when the device is being resumed by the system.
+ * @choose_configuration: If non-NULL, called instead of the default
+ * usb_choose_configuration(). If this returns an error then we'll go
+ * on to call the normal usb_choose_configuration().
* @dev_groups: Attributes attached to the device that will be created once it
* is bound to the driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
* @id_table: used with @match() to select better matching driver at
* probe() time.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1275,7 +1267,7 @@ struct usb_driver {
* resume and suspend functions will be called in addition to the driver's
* own, so this part of the setup does not need to be replicated.
*
- * USB drivers must provide all the fields listed above except drvwrap,
+ * USB drivers must provide all the fields listed above except driver,
* match, and id_table.
*/
struct usb_device_driver {
@@ -1287,14 +1279,17 @@ struct usb_device_driver {
int (*suspend) (struct usb_device *udev, pm_message_t message);
int (*resume) (struct usb_device *udev, pm_message_t message);
+
+ int (*choose_configuration) (struct usb_device *udev);
+
const struct attribute_group **dev_groups;
- struct usbdrv_wrap drvwrap;
+ struct device_driver driver;
const struct usb_device_id *id_table;
unsigned int supports_autosuspend:1;
unsigned int generic_subclass:1;
};
#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
- drvwrap.driver)
+ driver)
/**
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 00724b4f6e..cd77fc6095 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -372,8 +372,9 @@ struct hc_driver {
* or bandwidth constraints.
*/
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Returns the hardware-chosen device address */
- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
+ /* Set the hardware-chosen device address */
+ int (*address_device)(struct usb_hcd *, struct usb_device *udev,
+ unsigned int timeout_ms);
/* prepares the hardware to send commands to the device */
int (*enable_device)(struct usb_hcd *, struct usb_device *udev);
/* Notifies the HCD after a hub descriptor is fetched.
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index eeb7c2157c..59409c1fc3 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -72,4 +72,7 @@
/* device has endpoints that should be ignored */
#define USB_QUIRK_ENDPOINT_IGNORE BIT(15)
+/* short SET_ADDRESS request timeout */
+#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 83376473ac..467e8045e9 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -36,7 +36,9 @@
#define TCPC_ALERT_MASK 0x12
#define TCPC_POWER_STATUS_MASK 0x14
-#define TCPC_FAULT_STATUS_MASK 0x15
+
+#define TCPC_FAULT_STATUS_MASK 0x15
+#define TCPC_FAULT_STATUS_MASK_VCONN_OC BIT(1)
#define TCPC_EXTENDED_STATUS_MASK 0x16
#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0)
@@ -104,6 +106,7 @@
#define TCPC_FAULT_STATUS 0x1f
#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
+#define TCPC_FAULT_STATUS_VCONN_OC BIT(1)
#define TCPC_ALERT_EXTENDED 0x21
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index ab7ca87295..65fac5e1f3 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -173,5 +173,6 @@ void tcpm_pd_hard_reset(struct tcpm_port *port);
void tcpm_tcpc_reset(struct tcpm_port *port);
void tcpm_port_clean(struct tcpm_port *port);
bool tcpm_port_is_toggling(struct tcpm_port *port);
+void tcpm_port_error_recovery(struct tcpm_port *port);
#endif /* __LINUX_USB_TCPM_H */
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f2dc19f40d..e4056547fb 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -93,6 +93,17 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
extern long uffd_wp_range(struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp);
+/* move_pages */
+void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
+void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
+ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
+ unsigned long dst_start, unsigned long src_start,
+ unsigned long len, __u64 flags);
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr);
+
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 5ac5f182ce..8b1a298204 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -69,6 +69,13 @@ struct vfio_device {
u8 iommufd_attached:1;
#endif
u8 cdev_opened:1;
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * debug_root is a static property of the vfio_device
+ * which must be set prior to registering the vfio_device.
+ */
+ struct dentry *debug_root;
+#endif
};
/**
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 562e875486..85e84b9275 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -127,7 +127,27 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
+int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar);
pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
+#define VFIO_IOWRITE_DECLATION(size) \
+int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size val, void __iomem *io);
+
+VFIO_IOWRITE_DECLATION(8)
+VFIO_IOWRITE_DECLATION(16)
+VFIO_IOWRITE_DECLATION(32)
+#ifdef iowrite64
+VFIO_IOWRITE_DECLATION(64)
+#endif
+
+#define VFIO_IOREAD_DECLATION(size) \
+int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size *val, void __iomem *io);
+
+VFIO_IOREAD_DECLATION(8)
+VFIO_IOREAD_DECLATION(16)
+VFIO_IOREAD_DECLATION(32)
+
#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 4cc614a383..b0201747a2 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -103,6 +103,14 @@ int virtqueue_resize(struct virtqueue *vq, u32 num,
int virtqueue_reset(struct virtqueue *vq,
void (*recycle)(struct virtqueue *vq, void *buf));
+struct virtio_admin_cmd {
+ __le16 opcode;
+ __le16 group_type;
+ __le64 group_member_id;
+ struct scatterlist *data_sg;
+ struct scatterlist *result_sg;
+};
+
/**
* struct virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 2b3438de2c..da9b271b54 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -93,6 +93,8 @@ typedef void vq_callback_t(struct virtqueue *);
* Returns 0 on success or error status
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
* set.
+ * @create_avq: create admin virtqueue resource.
+ * @destroy_avq: destroy admin virtqueue resource.
*/
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -120,6 +122,8 @@ struct virtio_config_ops {
struct virtio_shm_region *region, u8 id);
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
+ int (*create_avq)(struct virtio_device *vdev);
+ void (*destroy_avq)(struct virtio_device *vdev);
};
/* If driver didn't advertise the feature, it will never appear. */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
deleted file mode 100644
index d2e2785af6..0000000000
--- a/include/linux/virtio_console.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers:
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of IBM nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
- * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
- */
-#ifndef _LINUX_VIRTIO_CONSOLE_H
-#define _LINUX_VIRTIO_CONSOLE_H
-
-#include <uapi/linux/virtio_console.h>
-
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
-#endif /* _LINUX_VIRTIO_CONSOLE_H */
diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h
new file mode 100644
index 0000000000..f4a100a0fe
--- /dev/null
+++ b/include/linux/virtio_pci_admin.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_ADMIN_H
+#define _LINUX_VIRTIO_PCI_ADMIN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev);
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+ u8 req_bar_flags, u8 *bar,
+ u64 *bar_offset);
+#endif
+
+#endif /* _LINUX_VIRTIO_PCI_ADMIN_H */
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index a09e13a577..c0b1b1ca11 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -125,4 +125,6 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev);
void vp_modern_remove(struct virtio_pci_modern_device *mdev);
int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev);
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev);
#endif
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 8abfa12400..747943bc8c 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -41,9 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
- PGDEMOTE_KSWAPD,
- PGDEMOTE_DIRECT,
- PGDEMOTE_KHUGEPAGED,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
@@ -145,6 +142,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_ZSWAP
ZSWPIN,
ZSWPOUT,
+ ZSWPWB,
#endif
#ifdef CONFIG_X86
DIRECT_MAP_LEVEL2_SPLIT,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index fed855bae6..343906a98d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -556,19 +556,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
local_irq_restore(flags);
}
-void __mod_lruvec_page_state(struct page *page,
+void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_page_state(page, idx, val);
+ __lruvec_stat_mod_folio(folio, idx, val);
local_irq_restore(flags);
}
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ lruvec_stat_mod_folio(page_folio(page), idx, val);
+}
+
#else
static inline void __mod_lruvec_state(struct lruvec *lruvec,
@@ -583,37 +589,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
- mod_node_page_state(page_pgdat(page), idx, val);
+ __mod_node_page_state(folio_pgdat(folio), idx, val);
}
-#endif /* CONFIG_MEMCG */
-
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, 1);
+ mod_node_page_state(folio_pgdat(folio), idx, val);
}
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, -1);
+ mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- __mod_lruvec_page_state(&folio->page, idx, val);
-}
+#endif /* CONFIG_MEMCG */
static inline void __lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
@@ -627,24 +621,6 @@ static inline void __lruvec_stat_sub_folio(struct folio *folio,
__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, 1);
-}
-
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, -1);
-}
-
-static inline void lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- mod_lruvec_page_state(&folio->page, idx, val);
-}
-
static inline void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
deleted file mode 100644
index 3495fd0dc7..0000000000
--- a/include/linux/w1-gpio.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * w1-gpio interface to platform code
- *
- * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
- */
-#ifndef _LINUX_W1_GPIO_H
-#define _LINUX_W1_GPIO_H
-
-struct gpio_desc;
-
-/**
- * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- */
-struct w1_gpio_platform_data {
- struct gpio_desc *gpiod;
- struct gpio_desc *pullup_gpiod;
- void (*enable_external_pullup)(int enable);
- unsigned int pullup_duration;
-};
-
-#endif /* _LINUX_W1_GPIO_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3473b66317..8aa3372f21 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <asm/current.h>
-#include <uapi/linux/wait.h>
typedef struct wait_queue_entry wait_queue_entry_t;
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 763bd382cf..686291b878 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
-#include <uapi/linux/wmi.h>
/**
* struct wmi_device - WMI device structure
@@ -22,11 +21,17 @@
*/
struct wmi_device {
struct device dev;
-
- /* private: used by the WMI driver core */
bool setable;
};
+/**
+ * to_wmi_device() - Helper macro to cast a device to a wmi_device
+ * @device: device struct
+ *
+ * Cast a struct device to a struct wmi_device.
+ */
+#define to_wmi_device(device) container_of(device, struct wmi_device, dev)
+
extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
u8 instance, u32 method_id,
const struct acpi_buffer *in,
@@ -35,9 +40,9 @@ extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
u8 instance);
-u8 wmidev_instance_count(struct wmi_device *wdev);
+acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in);
-extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
+u8 wmidev_instance_count(struct wmi_device *wdev);
/**
* struct wmi_driver - WMI driver structure
@@ -47,11 +52,8 @@ extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @notify: Callback for receiving WMI events
- * @filter_callback: Callback for filtering device IOCTLs
*
* This represents WMI drivers which handle WMI devices.
- * @filter_callback is only necessary for drivers which
- * want to set up a WMI IOCTL interface.
*/
struct wmi_driver {
struct device_driver driver;
@@ -61,8 +63,6 @@ struct wmi_driver {
int (*probe)(struct wmi_device *wdev, const void *context);
void (*remove)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
- long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd,
- struct wmi_ioctl_buffer *arg);
};
extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index ad97453e7c..2cc0a96061 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -14,12 +14,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/rcupdate.h>
-
-struct workqueue_struct;
-
-struct work_struct;
-typedef void (*work_func_t)(struct work_struct *work);
-void delayed_work_timer_fn(struct timer_list *t);
+#include <linux/workqueue_types.h>
/*
* The first word is the work queue pointer and the flags rolled into
@@ -95,15 +90,6 @@ enum {
#define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
-struct work_struct {
- atomic_long_t data;
- struct list_head entry;
- work_func_t func;
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
-#endif
-};
-
#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
#define WORK_DATA_STATIC_INIT() \
ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
@@ -405,13 +391,6 @@ enum {
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
-
- /*
- * Per-node default cap on min_active. Unless explicitly set, min_active
- * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
- * workqueue_struct->min_active definition.
- */
- WQ_DFL_MIN_ACTIVE = 8,
};
/*
@@ -454,33 +433,11 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
- * @max_active: max in-flight work items, 0 for default
+ * @max_active: max in-flight work items per CPU, 0 for default
* remaining args: args for @fmt
*
- * For a per-cpu workqueue, @max_active limits the number of in-flight work
- * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
- * executing at most one work item for the workqueue.
- *
- * For unbound workqueues, @max_active limits the number of in-flight work items
- * for the whole system. e.g. @max_active of 16 indicates that that there can be
- * at most 16 work items executing for the workqueue in the whole system.
- *
- * As sharing the same active counter for an unbound workqueue across multiple
- * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
- * according to the proportion of the number of online CPUs and enforced
- * independently.
- *
- * Depending on online CPU distribution, a node may end up with per-node
- * max_active which is significantly lower than @max_active, which can lead to
- * deadlocks if the per-node concurrency limit is lower than the maximum number
- * of interdependent work items for the workqueue.
- *
- * To guarantee forward progress regardless of online CPU distribution, the
- * concurrency limit on every node is guaranteed to be equal to or greater than
- * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
- * that the sum of per-node max_active's may be larger than @max_active.
- *
- * For detailed information on %WQ_* flags, please refer to
+ * Allocate a workqueue with the specified parameters. For detailed
+ * information on WQ_* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
* RETURNS:
@@ -520,7 +477,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
diff --git a/include/linux/workqueue_types.h b/include/linux/workqueue_types.h
new file mode 100644
index 0000000000..4c38824f3a
--- /dev/null
+++ b/include/linux/workqueue_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_WORKQUEUE_TYPES_H
+#define _LINUX_WORKQUEUE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/lockdep_types.h>
+#include <linux/timer_types.h>
+#include <linux/types.h>
+
+struct workqueue_struct;
+
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(struct timer_list *t);
+
+struct work_struct {
+ atomic_long_t data;
+ struct list_head entry;
+ work_func_t func;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+#endif /* _LINUX_WORKQUEUE_TYPES_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 083387c00f..453736fd1d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -60,7 +60,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
- unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
+ unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
/*
* When writeback IOs are bounced through async layers, only the
@@ -193,7 +193,6 @@ void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
- might_sleep();
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 2a60ce39cf..0b709f5bc6 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -5,19 +5,41 @@
#include <linux/types.h>
#include <linux/mm_types.h>
+struct lruvec;
+
extern u64 zswap_pool_total_size;
extern atomic_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP
+struct zswap_lruvec_state {
+ /*
+ * Number of pages in zswap that should be protected from the shrinker.
+ * This number is an estimate of the following counts:
+ *
+ * a) Recent page faults.
+ * b) Recent insertion to the zswap LRU. This includes new zswap stores,
+ * as well as recent zswap LRU rotations.
+ *
+ * These pages are likely to be warm, and might incur IO if the are written
+ * to swap.
+ */
+ atomic_long_t nr_zswap_protected;
+};
+
bool zswap_store(struct folio *folio);
bool zswap_load(struct folio *folio);
void zswap_invalidate(int type, pgoff_t offset);
void zswap_swapon(int type);
void zswap_swapoff(int type);
-
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
+void zswap_lruvec_state_init(struct lruvec *lruvec);
+void zswap_folio_swapin(struct folio *folio);
+bool is_zswap_enabled(void);
#else
+struct zswap_lruvec_state {};
+
static inline bool zswap_store(struct folio *folio)
{
return false;
@@ -31,6 +53,14 @@ static inline bool zswap_load(struct folio *folio)
static inline void zswap_invalidate(int type, pgoff_t offset) {}
static inline void zswap_swapon(int type) {}
static inline void zswap_swapoff(int type) {}
+static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
+static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
+static inline void zswap_folio_swapin(struct folio *folio) {}
+
+static inline bool is_zswap_enabled(void)
+{
+ return false;
+}
#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index 53e4b2eb2b..d779826851 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -207,7 +207,20 @@ struct cec_adap_ops {
* passthrough mode.
* @log_addrs: current logical addresses
* @conn_info: current connector info
- * @tx_timeouts: number of transmit timeouts
+ * @tx_timeout_cnt: count the number of Timed Out transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_low_drive_cnt: count the number of Low Drive transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_error_cnt: count the number of Error transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_arb_lost_cnt: count the number of Arb Lost transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the
+ * adapter was enabled. Used to avoid flooding the kernel
+ * log if this happens a lot.
+ * @tx_error_log_cnt: number of logged Error transmits since the adapter was
+ * enabled. Used to avoid flooding the kernel log if this
+ * happens a lot.
* @notifier: CEC notifier
* @pin: CEC pin status struct
* @cec_dir: debugfs cec directory
@@ -262,7 +275,12 @@ struct cec_adapter {
struct cec_log_addrs log_addrs;
struct cec_connector_info conn_info;
- u32 tx_timeouts;
+ u32 tx_timeout_cnt;
+ u32 tx_low_drive_cnt;
+ u32 tx_error_cnt;
+ u32 tx_arb_lost_cnt;
+ u32 tx_low_drive_log_cnt;
+ u32 tx_error_log_cnt;
#ifdef CONFIG_CEC_NOTIFIER
struct cec_notifier *notifier;
diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
index 8b0b361b46..4e96e90ee6 100644
--- a/include/media/v4l2-cci.h
+++ b/include/media/v4l2-cci.h
@@ -34,6 +34,11 @@ struct cci_reg_sequence {
#define CCI_REG_ADDR_MASK GENMASK(15, 0)
#define CCI_REG_WIDTH_SHIFT 16
#define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+/*
+ * Private CCI register flags, for the use of drivers.
+ */
+#define CCI_REG_PRIVATE_SHIFT 28U
+#define CCI_REG_PRIVATE_MASK GENMASK(31U, CCI_REG_PRIVATE_SHIFT)
#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x)
#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3)
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index d278836fd9..acf5be24a5 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -425,7 +425,7 @@ __v4l2_find_nearest_size(const void *array, size_t array_size,
/**
* v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by
- * calling the g_frame_interval op of the given subdev. It only works
+ * calling the get_frame_interval op of the given subdev. It only works
* for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
* function name.
*
@@ -438,7 +438,7 @@ int v4l2_g_parm_cap(struct video_device *vdev,
/**
* v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by
- * calling the s_frame_interval op of the given subdev. It only works
+ * calling the set_frame_interval op of the given subdev. It only works
* for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
* function name.
*
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index d6c8eb2b52..7f1af1f7f9 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -84,6 +84,12 @@ struct v4l2_m2m_queue_ctx {
* @last_src_buf: indicate the last source buffer for draining
* @next_buf_last: next capture queud buffer will be tagged as last
* @has_stopped: indicate the device has been stopped
+ * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE
+ * queue is not streaming. This allows firmware to
+ * analyze the bitstream header which arrives on the
+ * OUTPUT queue. The driver must implement the job_ready
+ * callback correctly to make sure that the requirements
+ * for actual decoding are met.
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @cap_q_ctx: Capture (output to memory) queue context
* @out_q_ctx: Output (input from memory) queue context
@@ -106,6 +112,7 @@ struct v4l2_m2m_ctx {
struct vb2_v4l2_buffer *last_src_buf;
bool next_buf_last;
bool has_stopped;
+ bool ignore_cap_streaming;
/* internal use only */
struct v4l2_m2m_dev *m2m_dev;
@@ -661,7 +668,7 @@ v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
- * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
+ * v4l2_m2m_last_src_buf() - return last source buffer from the list of
* ready buffers
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index c1f90c1223..a9e6b81462 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -452,12 +452,6 @@ enum v4l2_subdev_pre_streamon_flags {
*
* @g_pixelaspect: callback to return the pixelaspect ratio.
*
- * @g_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
- * ioctl handler code.
- *
- * @s_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
- * ioctl handler code.
- *
* @s_dv_timings: Set custom dv timings in the sub device. This is used
* when sub device is capable of setting detailed timing information
* in the hardware to generate/detect the video signal.
@@ -496,10 +490,6 @@ struct v4l2_subdev_video_ops {
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
- int (*g_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval);
- int (*s_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval);
int (*s_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*g_dv_timings)(struct v4l2_subdev *sd,
@@ -688,21 +678,16 @@ struct v4l2_subdev_ir_ops {
/**
* struct v4l2_subdev_pad_config - Used for storing subdev pad information.
*
- * @try_fmt: &struct v4l2_mbus_framefmt
- * @try_crop: &struct v4l2_rect to be used for crop
- * @try_compose: &struct v4l2_rect to be used for compose
- *
- * This structure only needs to be passed to the pad op if the 'which' field
- * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For
- * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL.
- *
- * Note: This struct is also used in active state, and the 'try' prefix is
- * historical and to be removed.
+ * @format: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
*/
struct v4l2_subdev_pad_config {
- struct v4l2_mbus_framefmt try_fmt;
- struct v4l2_rect try_crop;
- struct v4l2_rect try_compose;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_fract interval;
};
/**
@@ -714,6 +699,7 @@ struct v4l2_subdev_pad_config {
* @fmt: &struct v4l2_mbus_framefmt
* @crop: &struct v4l2_rect to be used for crop
* @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
*
* This structure stores configuration for a stream.
*/
@@ -725,6 +711,7 @@ struct v4l2_subdev_stream_config {
struct v4l2_mbus_framefmt fmt;
struct v4l2_rect crop;
struct v4l2_rect compose;
+ struct v4l2_fract interval;
};
/**
@@ -756,6 +743,7 @@ struct v4l2_subdev_krouting {
*
* @_lock: default for 'lock'
* @lock: mutex for the state. May be replaced by the user.
+ * @sd: the sub-device which the state is related to
* @pads: &struct v4l2_subdev_pad_config array
* @routing: routing table for the subdev
* @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS)
@@ -768,6 +756,7 @@ struct v4l2_subdev_state {
/* lock for the struct v4l2_subdev_state fields */
struct mutex _lock;
struct mutex *lock;
+ struct v4l2_subdev *sd;
struct v4l2_subdev_pad_config *pads;
struct v4l2_subdev_krouting routing;
struct v4l2_subdev_stream_configs stream_configs;
@@ -776,7 +765,6 @@ struct v4l2_subdev_state {
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
- * @init_cfg: initialize the pad config to default values
* @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler
* code.
* @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler
@@ -793,6 +781,12 @@ struct v4l2_subdev_state {
*
* @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code.
*
+ * @get_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
+ * ioctl handler code.
+ *
+ * @set_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
+ * ioctl handler code.
+ *
* @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code.
*
* @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code.
@@ -841,8 +835,6 @@ struct v4l2_subdev_state {
* directly, use v4l2_subdev_disable_streams() instead.
*/
struct v4l2_subdev_pad_ops {
- int (*init_cfg)(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state);
int (*enum_mbus_code)(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code);
@@ -864,6 +856,12 @@ struct v4l2_subdev_pad_ops {
int (*set_selection)(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
+ int (*get_frame_interval)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *interval);
+ int (*set_frame_interval)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *interval);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*dv_timings_cap)(struct v4l2_subdev *sd,
@@ -919,6 +917,8 @@ struct v4l2_subdev_ops {
/**
* struct v4l2_subdev_internal_ops - V4L2 subdev internal ops
*
+ * @init_state: initialize the subdev state to default values
+ *
* @registered: called when this subdev is registered. When called the v4l2_dev
* field is set to the correct v4l2_device.
*
@@ -944,6 +944,8 @@ struct v4l2_subdev_ops {
* these ops.
*/
struct v4l2_subdev_internal_ops {
+ int (*init_state)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state);
int (*registered)(struct v4l2_subdev *sd);
void (*unregistered)(struct v4l2_subdev *sd);
int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
@@ -1142,83 +1144,6 @@ struct v4l2_subdev_fh {
#define to_v4l2_subdev_fh(fh) \
container_of(fh, struct v4l2_subdev_fh, vfh)
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-
-/**
- * v4l2_subdev_get_pad_format - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_fmt
- *
- * @sd: pointer to &struct v4l2_subdev
- * @state: pointer to &struct v4l2_subdev_state
- * @pad: index of the pad in the &struct v4l2_subdev_state->pads array
- */
-static inline struct v4l2_mbus_framefmt *
-v4l2_subdev_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- unsigned int pad)
-{
- if (WARN_ON(!state))
- return NULL;
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &state->pads[pad].try_fmt;
-}
-
-/**
- * v4l2_subdev_get_pad_crop - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_crop
- *
- * @sd: pointer to &struct v4l2_subdev
- * @state: pointer to &struct v4l2_subdev_state.
- * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
- */
-static inline struct v4l2_rect *
-v4l2_subdev_get_pad_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- unsigned int pad)
-{
- if (WARN_ON(!state))
- return NULL;
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &state->pads[pad].try_crop;
-}
-
-/**
- * v4l2_subdev_get_pad_compose - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_compose
- *
- * @sd: pointer to &struct v4l2_subdev
- * @state: pointer to &struct v4l2_subdev_state.
- * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
- */
-static inline struct v4l2_rect *
-v4l2_subdev_get_pad_compose(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- unsigned int pad)
-{
- if (WARN_ON(!state))
- return NULL;
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &state->pads[pad].try_compose;
-}
-
-/*
- * Temprary helpers until uses of v4l2_subdev_get_try_* functions have been
- * renamed
- */
-#define v4l2_subdev_get_try_format(sd, state, pad) \
- v4l2_subdev_get_pad_format(sd, state, pad)
-
-#define v4l2_subdev_get_try_crop(sd, state, pad) \
- v4l2_subdev_get_pad_crop(sd, state, pad)
-
-#define v4l2_subdev_get_try_compose(sd, state, pad) \
- v4l2_subdev_get_pad_compose(sd, state, pad)
-
-#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
-
extern const struct v4l2_file_operations v4l2_subdev_fops;
/**
@@ -1392,88 +1317,105 @@ int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
*/
void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
-/**
- * v4l2_subdev_lock_state() - Locks the subdev state
- * @state: The subdevice state
- *
- * Locks the given subdev state.
- *
- * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+/*
+ * A macro to generate the macro or function name for sub-devices state access
+ * wrapper macros below.
*/
-static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state)
-{
- mutex_lock(state->lock);
-}
+#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \
+ __v4l2_subdev_state_get_ ## NAME ## ARG
/**
- * v4l2_subdev_unlock_state() - Unlocks the subdev state
- * @state: The subdevice state
+ * v4l2_subdev_state_get_format() - Get pointer to a stream format
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Unlocks the given subdev state.
+ * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad +
+ * stream in the subdev state.
+ *
+ * For stream-unaware drivers the format for the corresponding pad is returned.
+ * If the pad does not exist, NULL is returned.
*/
-static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state)
-{
- mutex_unlock(state->lock);
-}
+/*
+ * Wrap v4l2_subdev_state_get_format(), allowing the function to be called with
+ * two or three arguments. The purpose of the __v4l2_subdev_state_get_format()
+ * macro below is to come up with the name of the function or macro to call,
+ * using the last two arguments (_stream and _pad). The selected function or
+ * macro is then called using the arguments specified by the caller. A similar
+ * arrangement is used for v4l2_subdev_state_crop() and
+ * v4l2_subdev_state_compose() below.
+ */
+#define v4l2_subdev_state_get_format(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_format_pad(state, pad) \
+ __v4l2_subdev_state_get_format(state, pad, 0)
+struct v4l2_mbus_framefmt *
+__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
/**
- * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state
- * is unlocked and returns it
- * @sd: The subdevice
+ * v4l2_subdev_state_get_crop() - Get pointer to a stream crop rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Returns the active state for the subdevice, or NULL if the subdev does not
- * support active state. If the state is not NULL, calls
- * lockdep_assert_not_held() to issue a warning if the state is locked.
+ * This returns a pointer to crop rectangle for the given pad + stream in the
+ * subdev state.
*
- * This function is to be used e.g. when getting the active state for the sole
- * purpose of passing it forward, without accessing the state fields.
+ * For stream-unaware drivers the crop rectangle for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
*/
-static inline struct v4l2_subdev_state *
-v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd)
-{
- if (sd->active_state)
- lockdep_assert_not_held(sd->active_state->lock);
- return sd->active_state;
-}
+#define v4l2_subdev_state_get_crop(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_crop_pad(state, pad) \
+ __v4l2_subdev_state_get_crop(state, pad, 0)
+struct v4l2_rect *
+__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
+ u32 stream);
/**
- * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state
- * is locked and returns it
- *
- * @sd: The subdevice
+ * v4l2_subdev_state_get_compose() - Get pointer to a stream compose rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Returns the active state for the subdevice, or NULL if the subdev does not
- * support active state. If the state is not NULL, calls lockdep_assert_held()
- * to issue a warning if the state is not locked.
+ * This returns a pointer to compose rectangle for the given pad + stream in the
+ * subdev state.
*
- * This function is to be used when the caller knows that the active state is
- * already locked.
+ * For stream-unaware drivers the compose rectangle for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
*/
-static inline struct v4l2_subdev_state *
-v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd)
-{
- if (sd->active_state)
- lockdep_assert_held(sd->active_state->lock);
- return sd->active_state;
-}
+#define v4l2_subdev_state_get_compose(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_compose_pad(state, pad) \
+ __v4l2_subdev_state_get_compose(state, pad, 0)
+struct v4l2_rect *
+__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
/**
- * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev
- * state for the subdevice
- * @sd: The subdevice
+ * v4l2_subdev_state_get_interval() - Get pointer to a stream frame interval
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Returns the locked active state for the subdevice, or NULL if the subdev
- * does not support active state.
+ * This returns a pointer to the frame interval for the given pad + stream in
+ * the subdev state.
*
- * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ * For stream-unaware drivers the frame interval for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
*/
-static inline struct v4l2_subdev_state *
-v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd)
-{
- if (sd->active_state)
- v4l2_subdev_lock_state(sd->active_state);
- return sd->active_state;
-}
+#define v4l2_subdev_state_get_interval(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_interval_pad(state, pad) \
+ __v4l2_subdev_state_get_interval(state, pad, 0)
+struct v4l2_fract *
+__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
@@ -1495,6 +1437,24 @@ int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
/**
+ * v4l2_subdev_get_frame_interval() - Fill frame interval based on state
+ * @sd: subdevice
+ * @state: subdevice state
+ * @fi: pointer to &struct v4l2_subdev_frame_interval
+ *
+ * Fill @fi->interval field based on the information in the @fi struct.
+ *
+ * This function can be used by the subdev drivers which support active state to
+ * implement v4l2_subdev_pad_ops.get_frame_interval if the subdev driver does
+ * not need to do anything special in their get_frame_interval op.
+ *
+ * Returns 0 on success, error value otherwise.
+ */
+int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi);
+
+/**
* v4l2_subdev_set_routing() - Set given routing to subdev state
* @sd: The subdevice
* @state: The subdevice state
@@ -1540,52 +1500,6 @@ int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
const struct v4l2_mbus_framefmt *fmt);
/**
- * v4l2_subdev_state_get_stream_format() - Get pointer to a stream format
- * @state: subdevice state
- * @pad: pad id
- * @stream: stream id
- *
- * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad +
- * stream in the subdev state.
- *
- * If the state does not contain the given pad + stream, NULL is returned.
- */
-struct v4l2_mbus_framefmt *
-v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream);
-
-/**
- * v4l2_subdev_state_get_stream_crop() - Get pointer to a stream crop rectangle
- * @state: subdevice state
- * @pad: pad id
- * @stream: stream id
- *
- * This returns a pointer to crop rectangle for the given pad + stream in the
- * subdev state.
- *
- * If the state does not contain the given pad + stream, NULL is returned.
- */
-struct v4l2_rect *
-v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream);
-
-/**
- * v4l2_subdev_state_get_stream_compose() - Get pointer to a stream compose
- * rectangle
- * @state: subdevice state
- * @pad: pad id
- * @stream: stream id
- *
- * This returns a pointer to compose rectangle for the given pad + stream in the
- * subdev state.
- *
- * If the state does not contain the given pad + stream, NULL is returned.
- */
-struct v4l2_rect *
-v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream);
-
-/**
* v4l2_subdev_routing_find_opposite_end() - Find the opposite stream
* @routing: routing used to find the opposite side
* @pad: pad id
@@ -1787,6 +1701,89 @@ int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable);
#endif /* CONFIG_MEDIA_CONTROLLER */
/**
+ * v4l2_subdev_lock_state() - Locks the subdev state
+ * @state: The subdevice state
+ *
+ * Locks the given subdev state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state)
+{
+ mutex_lock(state->lock);
+}
+
+/**
+ * v4l2_subdev_unlock_state() - Unlocks the subdev state
+ * @state: The subdevice state
+ *
+ * Unlocks the given subdev state.
+ */
+static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state)
+{
+ mutex_unlock(state->lock);
+}
+
+/**
+ * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state
+ * is unlocked and returns it
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls
+ * lockdep_assert_not_held() to issue a warning if the state is locked.
+ *
+ * This function is to be used e.g. when getting the active state for the sole
+ * purpose of passing it forward, without accessing the state fields.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_not_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state
+ * is locked and returns it
+ *
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls lockdep_assert_held()
+ * to issue a warning if the state is not locked.
+ *
+ * This function is to be used when the caller knows that the active state is
+ * already locked.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev
+ * state for the subdevice
+ * @sd: The subdevice
+ *
+ * Returns the locked active state for the subdevice, or NULL if the subdev
+ * does not support active state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ v4l2_subdev_lock_state(sd->active_state);
+ return sd->active_state;
+}
+
+/**
* v4l2_subdev_init - initializes the sub-device struct
*
* @sd: pointer to the &struct v4l2_subdev to be initialized
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 4b6a9d2ea3..bec8c3a1ed 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -72,6 +72,10 @@ struct vb2_buffer;
* argument to other ops in this structure.
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
* be used.
+ * @prepare: called every time the buffer is passed from userspace to the
+ * driver, useful for cache synchronisation, optional.
+ * @finish: called every time the buffer is passed back from the driver
+ * to the userspace, also optional.
* @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation;
* used for DMABUF memory types; dev is the alloc device
* dbuf is the shared dma_buf; returns ERR_PTR() on failure;
@@ -86,10 +90,6 @@ struct vb2_buffer;
* dmabuf.
* @unmap_dmabuf: releases access control to the dmabuf - allocator is notified
* that this driver is done using the dmabuf for now.
- * @prepare: called every time the buffer is passed from userspace to the
- * driver, useful for cache synchronisation, optional.
- * @finish: called every time the buffer is passed back from the driver
- * to the userspace, also optional.
* @vaddr: return a kernel virtual address to a given memory buffer
* associated with the passed private structure or NULL if no
* such mapping exists.
@@ -402,7 +402,7 @@ struct vb2_buffer {
* by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
* If you need a minimum number of buffers before you can
* start streaming, then set
- * &vb2_queue->min_buffers_needed. If that is non-zero
+ * &vb2_queue->min_queued_buffers. If that is non-zero
* then @start_streaming won't be called until at least
* that many buffers have been queued up by userspace.
* @stop_streaming: called when 'streaming' state must be disabled; driver
@@ -484,7 +484,6 @@ struct vb2_buf_ops {
* caller. For example, for V4L2, it should match
* the types defined on &enum v4l2_buf_type.
* @io_modes: supported io methods (see &enum vb2_io_modes).
- * @alloc_devs: &struct device memory type/allocator-specific per-plane device
* @dev: device to use for the default allocation context if the driver
* doesn't fill in the @alloc_devs array.
* @dma_attrs: DMA attributes to use for the DMA.
@@ -546,10 +545,14 @@ struct vb2_buf_ops {
* @gfp_flags: additional gfp flags used when allocating the buffers.
* Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32
* to force the buffer allocation to a specific memory zone.
- * @min_buffers_needed: the minimum number of buffers needed before
+ * @min_queued_buffers: the minimum number of queued buffers needed before
* @start_streaming can be called. Used when a DMA engine
* cannot be started unless at least this number of buffers
* have been queued into the driver.
+ * VIDIOC_REQBUFS will ensure at least @min_queued_buffers
+ * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not
+ * modify the requested buffer count.
+ * @alloc_devs: &struct device memory type/allocator-specific per-plane device
*/
/*
* Private elements (won't appear at the uAPI book):
@@ -558,6 +561,9 @@ struct vb2_buf_ops {
* @dma_dir: DMA mapping direction.
* @bufs: videobuf2 buffer structures
* @num_buffers: number of allocated/used buffers
+ * @max_num_buffers: upper limit of number of allocated/used buffers.
+ * If set to 0 v4l2 core will change it VB2_MAX_FRAME
+ * for backward compatibility.
* @queued_list: list of buffers currently queued from userspace
* @queued_count: number of buffers queued and ready for streaming.
* @owned_by_drv_count: number of buffers owned by the driver
@@ -571,6 +577,9 @@ struct vb2_buf_ops {
* @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
* buffers. Only set for capture queues if qbuf has not yet been
* called since poll() needs to return %EPOLLERR in that situation.
+ * @waiting_in_dqbuf: set by the core for the duration of a blocking DQBUF, when
+ * it has to wait for a buffer to become available with vb2_queue->lock
+ * released. Used to prevent destroying the queue by other threads.
* @is_multiplanar: set if buffer type is multiplanar
* @is_output: set if buffer type is output
* @copy_timestamp: set if vb2-core should set timestamps
@@ -611,7 +620,7 @@ struct vb2_queue {
unsigned int buf_struct_size;
u32 timestamp_flags;
gfp_t gfp_flags;
- u32 min_buffers_needed;
+ u32 min_queued_buffers;
struct device *alloc_devs[VB2_MAX_PLANES];
@@ -619,8 +628,9 @@ struct vb2_queue {
struct mutex mmap_lock;
unsigned int memory;
enum dma_data_direction dma_dir;
- struct vb2_buffer *bufs[VB2_MAX_FRAME];
+ struct vb2_buffer **bufs;
unsigned int num_buffers;
+ unsigned int max_num_buffers;
struct list_head queued_list;
unsigned int queued_count;
@@ -747,7 +757,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q);
/**
* vb2_core_querybuf() - query video buffer information.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer struct passed from userspace.
*
* Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called
@@ -759,7 +769,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q);
*
* Return: returns zero on success; an error code otherwise.
*/
-void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
+void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
/**
* vb2_core_reqbufs() - Initiate streaming.
@@ -823,7 +833,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
* to the kernel.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer structure passed from userspace to
* &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
*
@@ -839,13 +849,13 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
/**
* vb2_core_qbuf() - Queue a buffer from userspace
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer structure passed from userspace to
* v4l2_ioctl_ops->vidioc_qbuf handler in driver
* @req: pointer to &struct media_request, may be NULL.
@@ -867,7 +877,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb,
struct media_request *req);
/**
@@ -931,7 +941,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
* @fd: pointer to the file descriptor associated with DMABUF
* (set by driver).
* @type: buffer type.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @plane: index of the plane to be exported, 0 for single plane queues
* @flags: file flags for newly created file, as defined at
* include/uapi/asm-generic/fcntl.h.
@@ -945,7 +955,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
- unsigned int index, unsigned int plane, unsigned int flags);
+ struct vb2_buffer *vb, unsigned int plane, unsigned int flags);
/**
* vb2_core_queue_init() - initialize a videobuf2 queue
@@ -1140,6 +1150,15 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
}
/**
+ * vb2_get_num_buffers() - get the number of buffer in a queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ */
+static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q)
+{
+ return q->num_buffers;
+}
+
+/**
* vb2_is_busy() - return busy status of the queue.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
*
@@ -1147,7 +1166,7 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
*/
static inline bool vb2_is_busy(struct vb2_queue *q)
{
- return (q->num_buffers > 0);
+ return vb2_get_num_buffers(q) > 0;
}
/**
@@ -1239,6 +1258,12 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
unsigned int index)
{
+ if (!q->bufs)
+ return NULL;
+
+ if (index >= q->max_num_buffers)
+ return NULL;
+
if (index < q->num_buffers)
return q->bufs[index];
return NULL;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 4ae0580b63..e1e5e72b90 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -137,6 +137,7 @@ struct tc_action_ops {
#ifdef CONFIG_NET_CLS_ACT
+#define ACT_P_BOUND 0
#define ACT_P_CREATED 1
#define ACT_P_DELETED 1
@@ -191,7 +192,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int bind,
u32 flags);
-void tcf_idr_insert_many(struct tc_action *actions[]);
+void tcf_idr_insert_many(struct tc_action *actions[], int init_res[]);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
@@ -207,8 +208,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est,
struct tc_action *actions[], int init_res[], size_t *attr_size,
u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
-struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
- bool rtnl_held,
+struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 61ebe723ee..facb7a469e 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -437,6 +437,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
refcount_inc(&ifp->refcnt);
}
+static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
+{
+ return refcount_inc_not_zero(&ifp->refcnt);
+}
/*
* compute link-local solicited-node multicast address
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index afd40dce40..77bf30203d 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -55,7 +55,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
- atomic_long_t inflight;
+ unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
@@ -77,6 +77,9 @@ enum unix_socket_lock_class {
U_LOCK_NORMAL,
U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+ U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
+ * candidates to close a small race window.
+ */
};
static inline void unix_state_lock_nested(struct sock *sk,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 7ffa8c192c..eaec5d6caa 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -164,6 +164,8 @@ struct bt_voice {
#define BT_ISO_QOS_BIG_UNSET 0xff
#define BT_ISO_QOS_BIS_UNSET 0xff
+#define BT_ISO_SYNC_TIMEOUT 0x07d0 /* 20 secs */
+
struct bt_iso_io_qos {
__u32 interval;
__u16 latency;
@@ -583,6 +585,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
+static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
+ sockptr_t src, size_t src_size)
+{
+ if (dst_size > src_size)
+ return -EINVAL;
+
+ return copy_from_sockptr(dst, src, dst_size);
+}
+
int bt_to_errno(u16 code);
__u8 bt_status(int err);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 0d23102457..2cfd8d8626 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -176,6 +176,15 @@ enum {
*/
HCI_QUIRK_USE_BDADDR_PROPERTY,
+ /* When this quirk is set, the Bluetooth Device Address provided by
+ * the 'local-bd-address' fwnode property is incorrectly specified in
+ * big-endian order.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BDADDR_PROPERTY_BROKEN,
+
/* When this quirk is set, the duplicate filtering during
* scanning is based on Bluetooth devices addresses. To allow
* RSSI based updates, restart scanning if needed.
@@ -330,6 +339,14 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_BROKEN_LE_CODED,
+
+ /*
+ * When this quirk is set, the HCI_OP_READ_ENC_KEY_SIZE command is
+ * skipped during an HCI_EV_ENCRYPT_CHANGE event. This is required
+ * for Actions Semiconductor ATS2851 based controllers, which erroneously
+ * claim to support it.
+ */
+ HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
};
/* HCI device flags */
@@ -436,6 +453,7 @@ enum {
#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
@@ -651,6 +669,7 @@ enum {
#define HCI_ERROR_PIN_OR_KEY_MISSING 0x06
#define HCI_ERROR_MEMORY_EXCEEDED 0x07
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
+#define HCI_ERROR_COMMAND_DISALLOWED 0x0c
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
#define HCI_ERROR_INVALID_PARAMETERS 0x12
@@ -659,6 +678,7 @@ enum {
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
@@ -2033,6 +2053,7 @@ struct hci_cp_le_set_per_adv_params {
} __packed;
#define HCI_MAX_PER_AD_LENGTH 252
+#define HCI_MAX_PER_AD_TOT_LEN 1650
#define HCI_OP_LE_SET_PER_ADV_DATA 0x203f
struct hci_cp_le_set_per_adv_data {
@@ -2793,6 +2814,10 @@ struct hci_ev_le_per_adv_report {
__u8 data[];
} __packed;
+#define LE_PA_DATA_COMPLETE 0x00
+#define LE_PA_DATA_MORE_TO_COME 0x01
+#define LE_PA_DATA_TRUNCATED 0x02
+
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
struct hci_evt_le_ext_adv_set_term {
__u8 status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 24446038d8..fe9e1524d3 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -539,7 +539,6 @@ struct hci_dev {
struct work_struct tx_work;
struct delayed_work le_scan_disable;
- struct delayed_work le_scan_restart;
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
@@ -736,8 +735,11 @@ struct hci_conn {
__u16 le_supv_timeout;
__u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 le_adv_data_len;
- __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
- __u8 le_per_adv_data_len;
+ __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
+ __u16 le_per_adv_data_len;
+ __u16 le_per_adv_data_offset;
+ __u8 le_adv_phy;
+ __u8 le_adv_sec_phy;
__u8 le_tx_phy;
__u8 le_rx_phy;
__s8 rssi;
@@ -1085,6 +1087,24 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
}
+static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c == conn) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1298,6 +1318,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
}
static inline struct hci_conn *
+hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK ||
+ c->state != state)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1458,7 +1502,6 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
bdaddr_t *dst, u8 role);
void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
-void hci_conn_check_pending(struct hci_dev *hdev);
struct hci_chan *hci_chan_create(struct hci_conn *conn);
void hci_chan_del(struct hci_chan *chan);
@@ -1471,12 +1514,14 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
- u16 conn_timeout, u8 role);
+ u16 conn_timeout, u8 role, u8 phy, u8 sec_phy);
+void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type,
- enum conn_reasons conn_reason);
+ enum conn_reasons conn_reason, u16 timeout);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
- __u16 setting, struct bt_codec *codec);
+ __u16 setting, struct bt_codec *codec,
+ u16 timeout);
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos);
struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
@@ -1862,6 +1907,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
(hdev->commands[39] & 0x04))
+#define read_key_size_capable(dev) \
+ ((dev)->commands[20] & 0x10 && \
+ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+
/* Use enhanced synchronous connection if command is supported and its quirk
* has not been set.
*/
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index e2582c2425..6a9d063e9f 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -48,6 +48,18 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+struct hci_cmd_sync_work_entry *
+hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
+ struct hci_cmd_sync_work_entry *entry);
+bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
+ hci_cmd_sync_work_func_t func, void *data,
+ hci_cmd_sync_work_destroy_t destroy);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
@@ -127,8 +139,6 @@ struct hci_conn;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
-int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
-
int hci_le_create_cis_sync(struct hci_dev *hdev);
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
@@ -138,3 +148,9 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason);
int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle);
int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle);
+
+int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
+
+int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 92d7197f9a..a4278aa618 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -939,7 +939,7 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid);
struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
- bdaddr_t *dst, u8 dst_type);
+ bdaddr_t *dst, u8 dst_type, u16 timeout);
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e90596e21c..0180eaec45 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -52,7 +52,7 @@
* such wiphy can have zero, one, or many virtual interfaces associated with
* it, which need to be identified as such by pointing the network interface's
* @ieee80211_ptr pointer to a &struct wireless_dev which further describes
- * the wireless part of the interface, normally this struct is embedded in the
+ * the wireless part of the interface. Normally this struct is embedded in the
* network interface's private data area. Drivers can optionally allow creating
* or destroying virtual interfaces on the fly, but without at least one or the
* ability to create some the wireless device isn't useful.
@@ -117,6 +117,11 @@ struct wiphy;
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
+ * @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT
+ * @IEEE80211_CHAN_NO_UHB_VLP_CLIENT: Client connection with VLP AP
+ * not permitted using this channel
+ * @IEEE80211_CHAN_NO_UHB_AFC_CLIENT: Client connection with AFC AP
+ * not permitted using this channel
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -140,6 +145,9 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_16MHZ = 1<<18,
IEEE80211_CHAN_NO_320MHZ = 1<<19,
IEEE80211_CHAN_NO_EHT = 1<<20,
+ IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
+ IEEE80211_CHAN_NO_UHB_VLP_CLIENT= 1<<22,
+ IEEE80211_CHAN_NO_UHB_AFC_CLIENT= 1<<23,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -977,6 +985,15 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
const struct cfg80211_chan_def *chandef2);
/**
+ * nl80211_chan_width_to_mhz - get the channel width in MHz
+ * @chan_width: the channel width from &enum nl80211_chan_width
+ *
+ * Return: channel width in MHz if the chan_width from &enum nl80211_chan_width
+ * is valid. -1 otherwise.
+ */
+int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width);
+
+/**
* cfg80211_chandef_valid - check if a channel definition is valid
* @chandef: the channel definition to check
* Return: %true if the channel definition is valid. %false otherwise.
@@ -1665,6 +1682,21 @@ struct link_station_del_parameters {
};
/**
+ * struct cfg80211_ttlm_params: TID to link mapping parameters
+ *
+ * Used for setting a TID to link mapping.
+ *
+ * @dlink: Downlink TID to link mapping, as defined in section 9.4.2.314
+ * (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
+ * @ulink: Uplink TID to link mapping, as defined in section 9.4.2.314
+ * (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
+ */
+struct cfg80211_ttlm_params {
+ u16 dlink[8];
+ u16 ulink[8];
+};
+
+/**
* struct station_parameters - station parameters
*
* Used to change and create a new station.
@@ -2560,7 +2592,7 @@ struct cfg80211_scan_info {
* @short_ssid: short ssid to scan for
* @bssid: bssid to scan for
* @channel_idx: idx of the channel in the channel array in the scan request
- * which the above info relvant to
+ * which the above info is relevant to
* @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
* @short_ssid_valid: @short_ssid is valid and can be used
* @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
@@ -2608,6 +2640,8 @@ struct cfg80211_scan_6ghz_params {
* @n_6ghz_params: number of 6 GHz params
* @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
+ * @tsf_report_link_id: for MLO, indicates the link ID of the BSS that should be
+ * used for TSF reporting. Can be set to -1 to indicate no preference.
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -2636,6 +2670,7 @@ struct cfg80211_scan_request {
bool scan_6ghz;
u32 n_6ghz_params;
struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+ s8 tsf_report_link_id;
/* keep last */
struct ieee80211_channel *channels[] __counted_by(n_channels);
@@ -2816,6 +2851,13 @@ enum cfg80211_signal_type {
* the BSS that requested the scan in which the beacon/probe was received.
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @restrict_use: restrict usage, if not set, assume @use_for is
+ * %NL80211_BSS_USE_FOR_NORMAL.
+ * @use_for: bitmap of possible usage for this BSS, see
+ * &enum nl80211_bss_use_for
+ * @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
+ * if @restrict_use is set and @use_for is zero (empty); may be 0 for
+ * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
* @drv_data: Data to be passed through to @inform_bss
*/
struct cfg80211_inform_bss {
@@ -2827,6 +2869,9 @@ struct cfg80211_inform_bss {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 restrict_use:1, use_for:7;
+ u8 cannot_use_reasons;
+
void *drv_data;
};
@@ -2880,6 +2925,11 @@ struct cfg80211_bss_ies {
* @chain_signal: per-chain signal strength of last received BSS in dBm.
* @bssid_index: index in the multiple BSS set
* @max_bssid_indicator: max number of members in the BSS set
+ * @use_for: bitmap of possible usage for this BSS, see
+ * &enum nl80211_bss_use_for
+ * @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
+ * if @restrict_use is set and @use_for is zero (empty); may be 0 for
+ * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
@@ -2907,6 +2957,9 @@ struct cfg80211_bss {
u8 bssid_index;
u8 max_bssid_indicator;
+ u8 use_for;
+ u8 cannot_use_reasons;
+
u8 priv[] __aligned(sizeof(void *));
};
@@ -3184,8 +3237,8 @@ struct cfg80211_ibss_params {
*
* @behaviour: requested BSS selection behaviour.
* @param: parameters for requestion behaviour.
- * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
- * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ * @param.band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @param.adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
*/
struct cfg80211_bss_selection {
enum nl80211_bss_select_attr behaviour;
@@ -4497,6 +4550,7 @@ struct mgmt_frame_regs {
* @del_link_station: Remove a link of a station.
*
* @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
+ * @set_ttlm: set the TID to link mapping.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4856,6 +4910,8 @@ struct cfg80211_ops {
struct link_station_del_parameters *params);
int (*set_hw_timestamp)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_set_hw_timestamp *hwts);
+ int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ttlm_params *params);
};
/*
@@ -4914,6 +4970,8 @@ struct cfg80211_ops {
* NL80211_REGDOM_SET_BY_DRIVER.
* @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver
* set this flag to update channels on beacon hints.
+ * @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link
+ * of an NSTR mobile AP MLD.
* @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device
*/
enum wiphy_flags {
@@ -4929,7 +4987,7 @@ enum wiphy_flags {
WIPHY_FLAG_DISABLE_WEXT = BIT(9),
WIPHY_FLAG_MESH_AUTH = BIT(10),
WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
- /* use hole at 12 */
+ WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12),
WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
WIPHY_FLAG_AP_UAPSD = BIT(14),
WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
@@ -6019,7 +6077,6 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
* wireless device if it has no netdev
* @u: union containing data specific to @iftype
* @connected: indicates if connected or not (STA mode)
- * @bssid: (private) Used by the internal configuration code
* @wext: (private) Used by the internal wireless extensions compat code
* @wext.ibss: (private) IBSS data part of wext handling
* @wext.connect: (private) connection handling data
@@ -6039,8 +6096,6 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
* @mgmt_registrations: list of registrations for management frames
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
- * @beacon_interval: beacon interval used on this device for transmitting
- * beacons, 0 when not valid
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
@@ -7122,6 +7177,23 @@ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
enum nl80211_band band);
/**
+ * cfg80211_ssid_eq - compare two SSIDs
+ * @a: first SSID
+ * @b: second SSID
+ *
+ * Return: %true if SSIDs are equal, %false otherwise.
+ */
+static inline bool
+cfg80211_ssid_eq(struct cfg80211_ssid *a, struct cfg80211_ssid *b)
+{
+ if (WARN_ON(!a || !b))
+ return false;
+ if (a->ssid_len != b->ssid_len)
+ return false;
+ return memcmp(a->ssid, b->ssid, a->ssid_len) ? false : true;
+}
+
+/**
* cfg80211_inform_bss_data - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
@@ -7168,6 +7240,25 @@ cfg80211_inform_bss(struct wiphy *wiphy,
}
/**
+ * __cfg80211_get_bss - get a BSS reference
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @channel: the channel to search on (or %NULL)
+ * @bssid: the desired BSSID (or %NULL)
+ * @ssid: the desired SSID (or %NULL)
+ * @ssid_len: length of the SSID (or 0)
+ * @bss_type: type of BSS, see &enum ieee80211_bss_type
+ * @privacy: privacy filter, see &enum ieee80211_privacy
+ * @use_for: indicates which use is intended
+ */
+struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
+ const u8 *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy,
+ u32 use_for);
+
+/**
* cfg80211_get_bss - get a BSS reference
* @wiphy: the wiphy this BSS struct belongs to
* @channel: the channel to search on (or %NULL)
@@ -7176,13 +7267,20 @@ cfg80211_inform_bss(struct wiphy *wiphy,
* @ssid_len: length of the SSID (or 0)
* @bss_type: type of BSS, see &enum ieee80211_bss_type
* @privacy: privacy filter, see &enum ieee80211_privacy
+ *
+ * This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL.
*/
-struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *bssid,
- const u8 *ssid, size_t ssid_len,
- enum ieee80211_bss_type bss_type,
- enum ieee80211_privacy privacy);
+static inline struct cfg80211_bss *
+cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel,
+ const u8 *bssid, const u8 *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy)
+{
+ return __cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len,
+ bss_type, privacy,
+ NL80211_BSS_USE_FOR_NORMAL);
+}
+
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -7276,8 +7374,6 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
/**
* struct cfg80211_rx_assoc_resp_data - association response data
- * @bss: the BSS that association was requested with, ownership of the pointer
- * moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
* @buf: (Re)Association Response frame (header + body)
* @len: length of the frame data
* @uapsd_queues: bitmap of queues configured for uapsd. Same format
@@ -7287,6 +7383,8 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* @ap_mld_addr: AP MLD address (in case of MLO)
* @links: per-link information indexed by link ID, use links[0] for
* non-MLO connections
+ * @links.bss: the BSS that association was requested with, ownership of the
+ * pointer moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
* @links.status: Set this (along with a BSS pointer) for links that
* were rejected by the AP.
*/
@@ -7315,7 +7413,7 @@ struct cfg80211_rx_assoc_resp_data {
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
void cfg80211_rx_assoc_resp(struct net_device *dev,
- struct cfg80211_rx_assoc_resp_data *data);
+ const struct cfg80211_rx_assoc_resp_data *data);
/**
* struct cfg80211_assoc_failure - association failure data
@@ -7434,7 +7532,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev,
* RFkill integration in cfg80211 is almost invisible to drivers,
* as cfg80211 automatically registers an rfkill instance for each
* wireless device it knows about. Soft kill is also translated
- * into disconnecting and turning all interfaces off, drivers are
+ * into disconnecting and turning all interfaces off. Drivers are
* expected to turn off the device when all interfaces are down.
*
* However, devices may have a hard RFkill line, in which case they
@@ -7482,7 +7580,7 @@ static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
* the configuration mechanism.
*
* A driver supporting vendor commands must register them as an array
- * in struct wiphy, with handlers for each one, each command has an
+ * in struct wiphy, with handlers for each one. Each command has an
* OUI and sub command ID to identify it.
*
* Note that this feature should not be (ab)used to implement protocol
@@ -7646,7 +7744,7 @@ static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
* interact with driver-specific tools to aid, for instance,
* factory programming.
*
- * This chapter describes how drivers interact with it, for more
+ * This chapter describes how drivers interact with it. For more
* information see the nl80211 book's chapter on it.
*/
@@ -9305,6 +9403,16 @@ bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
*/
void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
+/**
+ * cfg80211_schedule_channels_check - schedule regulatory check if needed
+ * @wdev: the wireless device to check
+ *
+ * In case the device supports NO_IR or DFS relaxations, schedule regulatory
+ * channels check, as previous concurrent operation conditions may not
+ * hold anymore.
+ */
+void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
+
#ifdef CONFIG_CFG80211_DEBUGFS
/**
* wiphy_locked_debugfs_read - do a locked read in debugfs
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 519d23941b..76d2cd2e2b 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -20,6 +20,7 @@ struct wpan_phy;
struct wpan_phy_cca;
struct cfg802154_scan_request;
struct cfg802154_beacon_request;
+struct ieee802154_addr;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
struct ieee802154_llsec_device_key;
@@ -77,6 +78,12 @@ struct cfg802154_ops {
struct cfg802154_beacon_request *request);
int (*stop_beacons)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev);
+ int (*associate)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *coord);
+ int (*disassociate)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
void (*get_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
@@ -304,6 +311,22 @@ struct ieee802154_coord_desc {
};
/**
+ * struct ieee802154_pan_device - PAN device information
+ * @pan_id: the PAN ID of this device
+ * @mode: the preferred mode to reach the device
+ * @short_addr: the short address of this device
+ * @extended_addr: the extended address of this device
+ * @node: the list node
+ */
+struct ieee802154_pan_device {
+ __le16 pan_id;
+ u8 mode;
+ __le16 short_addr;
+ __le64 extended_addr;
+ struct list_head node;
+};
+
+/**
* struct cfg802154_scan_request - Scan request
*
* @type: type of scan to be performed
@@ -479,6 +502,13 @@ struct wpan_dev {
/* fallback for acknowledgment bit setting */
bool ackreq;
+
+ /* Associations */
+ struct mutex association_lock;
+ struct ieee802154_pan_device *parent;
+ struct list_head children;
+ unsigned int max_associations;
+ unsigned int nchildren;
};
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
@@ -530,4 +560,46 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy)
void ieee802154_configure_durations(struct wpan_phy *phy,
unsigned int page, unsigned int channel);
+/**
+ * cfg802154_device_is_associated - Checks whether we are associated to any device
+ * @wpan_dev: the wpan device
+ * @return: true if we are associated
+ */
+bool cfg802154_device_is_associated(struct wpan_dev *wpan_dev);
+
+/**
+ * cfg802154_device_is_parent - Checks if a device is our coordinator
+ * @wpan_dev: the wpan device
+ * @target: the expected parent
+ * @return: true if @target is our coordinator
+ */
+bool cfg802154_device_is_parent(struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
+
+/**
+ * cfg802154_device_is_child - Checks whether a device is associated to us
+ * @wpan_dev: the wpan device
+ * @target: the expected child
+ * @return: the PAN device
+ */
+struct ieee802154_pan_device *
+cfg802154_device_is_child(struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
+
+/**
+ * cfg802154_set_max_associations - Limit the number of future associations
+ * @wpan_dev: the wpan device
+ * @max: the maximum number of devices we accept to associate
+ * @return: the old maximum value
+ */
+unsigned int cfg802154_set_max_associations(struct wpan_dev *wpan_dev,
+ unsigned int max);
+
+/**
+ * cfg802154_get_free_short_addr - Get a free address among the known devices
+ * @wpan_dev: the wpan device
+ * @return: a random short address expectedly unused on our PAN
+ */
+__le16 cfg802154_get_free_short_addr(struct wpan_dev *wpan_dev);
+
#endif /* __NET_CFG802154_H */
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 3c70ad53a4..6d3a201632 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -85,7 +85,10 @@
FN(IPV6_NDISC_BAD_OPTIONS) \
FN(IPV6_NDISC_NS_OTHERHOST) \
FN(QUEUE_PURGE) \
- FN(TC_ERROR) \
+ FN(TC_COOKIE_ERROR) \
+ FN(PACKET_SOCK_ERROR) \
+ FN(TC_CHAIN_NOTFOUND) \
+ FN(TC_RECLASSIFY_LOOP) \
FNe(MAX)
/**
@@ -376,8 +379,23 @@ enum skb_drop_reason {
SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST,
/** @SKB_DROP_REASON_QUEUE_PURGE: bulk free. */
SKB_DROP_REASON_QUEUE_PURGE,
- /** @SKB_DROP_REASON_TC_ERROR: generic internal tc error. */
- SKB_DROP_REASON_TC_ERROR,
+ /**
+ * @SKB_DROP_REASON_TC_COOKIE_ERROR: An error occurred whilst
+ * processing a tc ext cookie.
+ */
+ SKB_DROP_REASON_TC_COOKIE_ERROR,
+ /**
+ * @SKB_DROP_REASON_PACKET_SOCK_ERROR: generic packet socket errors
+ * after its filter matches an incoming packet.
+ */
+ SKB_DROP_REASON_PACKET_SOCK_ERROR,
+ /** @SKB_DROP_REASON_TC_CHAIN_NOTFOUND: tc chain lookup failed. */
+ SKB_DROP_REASON_TC_CHAIN_NOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TC_RECLASSIFY_LOOP: tc exceeded max reclassify loop
+ * iterations.
+ */
+ SKB_DROP_REASON_TC_RECLASSIFY_LOOP,
/**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 82da359bca..d17855c52e 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -172,8 +172,7 @@ void fib_rules_unregister(struct fib_rules_ops *);
int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
-int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
- u32 flags);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index c53244f204..e614691294 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -8,16 +8,19 @@
#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
+/* Binding to multicast group requires %CAP_NET_ADMIN */
+#define GENL_MCAST_CAP_NET_ADMIN BIT(0)
+/* Binding to multicast group requires %CAP_SYS_ADMIN */
+#define GENL_MCAST_CAP_SYS_ADMIN BIT(1)
+
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
- * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
- * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
+ * @flags: GENL_MCAST_* flags
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
u8 flags;
- u8 cap_sys_admin:1;
};
struct genl_split_ops;
@@ -51,6 +54,9 @@ struct genl_info;
* @split_ops: the split do/dump form of operation definition
* @n_split_ops: number of entries in @split_ops, not that with split do/dump
* ops the number of entries is not the same as number of commands
+ * @sock_priv_size: the size of per-socket private memory
+ * @sock_priv_init: the per-socket private memory initializer
+ * @sock_priv_destroy: the per-socket private memory destructor
*
* Attribute policies (the combination of @policy and @maxattr fields)
* can be attached at the family level or at the operation level.
@@ -84,11 +90,17 @@ struct genl_family {
const struct genl_multicast_group *mcgrps;
struct module *module;
+ size_t sock_priv_size;
+ void (*sock_priv_init)(void *priv);
+ void (*sock_priv_destroy)(void *priv);
+
/* private: internal use only */
/* protocol family identifier */
int id;
/* starting number of multicast group IDs in this family */
unsigned int mcgrp_offset;
+ /* list of per-socket privs */
+ struct xarray *sock_privs;
};
/**
@@ -298,6 +310,8 @@ static inline bool genl_info_is_ntf(const struct genl_info *info)
return !info->nlhdr;
}
+void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk);
+void *genl_sk_priv_get(struct genl_family *family, struct sock *sk);
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
@@ -438,6 +452,35 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
}
/**
+ * genlmsg_multicast_netns_filtered - multicast a netlink message
+ * to a specific netns with filter
+ * function
+ * @family: the generic netlink family
+ * @net: the net namespace
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+ * @flags: allocation flags
+ * @filter: filter function
+ * @filter_data: filter function private data
+ *
+ * Return: 0 on success, negative error code for failure.
+ */
+static inline int
+genlmsg_multicast_netns_filtered(const struct genl_family *family,
+ struct net *net, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags,
+ netlink_filter_fn filter,
+ void *filter_data)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return nlmsg_multicast_filtered(net->genl_sock, skb, portid, group,
+ flags, filter, filter_data);
+}
+
+/**
* genlmsg_multicast_netns - multicast a netlink message to a specific netns
* @family: the generic netlink family
* @net: the net namespace
@@ -450,10 +493,8 @@ static inline int genlmsg_multicast_netns(const struct genl_family *family,
struct net *net, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags)
{
- if (WARN_ON_ONCE(group >= family->n_mcgrps))
- return -EINVAL;
- group = family->mcgrp_offset + group;
- return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
+ return genlmsg_multicast_netns_filtered(family, net, skb, portid,
+ group, flags, NULL, NULL);
}
/**
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 063313df44..4de858f992 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -125,6 +125,35 @@ struct ieee802154_hdr_fc {
#endif
};
+struct ieee802154_assoc_req_pl {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved1:1,
+ device_type:1,
+ power_source:1,
+ rx_on_when_idle:1,
+ assoc_type:1,
+ reserved2:1,
+ security_cap:1,
+ alloc_addr:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 alloc_addr:1,
+ security_cap:1,
+ reserved2:1,
+ assoc_type:1,
+ rx_on_when_idle:1,
+ power_source:1,
+ device_type:1,
+ reserved1:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct ieee802154_assoc_resp_pl {
+ __le16 short_addr;
+ u8 status;
+} __packed;
+
enum ieee802154_frame_version {
IEEE802154_2003_STD,
IEEE802154_2006_STD,
@@ -140,6 +169,19 @@ enum ieee802154_addressing_mode {
IEEE802154_EXTENDED_ADDRESSING,
};
+enum ieee802154_association_status {
+ IEEE802154_ASSOCIATION_SUCCESSFUL = 0x00,
+ IEEE802154_PAN_AT_CAPACITY = 0x01,
+ IEEE802154_PAN_ACCESS_DENIED = 0x02,
+ IEEE802154_HOPPING_SEQUENCE_OFFSET_DUP = 0x03,
+ IEEE802154_FAST_ASSOCIATION_SUCCESSFUL = 0x80,
+};
+
+enum ieee802154_disassociation_reason {
+ IEEE802154_COORD_WISHES_DEVICE_TO_LEAVE = 0x1,
+ IEEE802154_DEVICE_WISHES_TO_LEAVE = 0x2,
+};
+
struct ieee802154_hdr {
struct ieee802154_hdr_fc fc;
u8 seq;
@@ -163,6 +205,24 @@ struct ieee802154_beacon_req_frame {
struct ieee802154_mac_cmd_pl mac_pl;
};
+struct ieee802154_association_req_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ struct ieee802154_assoc_req_pl assoc_req_pl;
+};
+
+struct ieee802154_association_resp_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ struct ieee802154_assoc_resp_pl assoc_resp_pl;
+};
+
+struct ieee802154_disassociation_notif_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ u8 disassoc_pl;
+};
+
/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
* the contents of hdr will be, and the actual value of those bits in
* hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 9ab4bf704e..ccf171f7eb 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -175,6 +175,7 @@ void inet_csk_init_xmit_timers(struct sock *sk,
void (*delack_handler)(struct timer_list *),
void (*keepalive_handler)(struct timer_list *));
void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_clear_xmit_timers_sync(struct sock *sk);
static inline void inet_csk_schedule_ack(struct sock *sk)
{
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 3ecfeadbfa..7f1b384587 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -88,7 +88,7 @@ struct inet_bind_bucket {
unsigned short fast_sk_family;
bool fast_ipv6_only;
struct hlist_node node;
- struct hlist_head owners;
+ struct hlist_head bhash2;
};
struct inet_bind2_bucket {
@@ -96,22 +96,17 @@ struct inet_bind2_bucket {
int l3mdev;
unsigned short port;
#if IS_ENABLED(CONFIG_IPV6)
- unsigned short family;
-#endif
- union {
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr v6_rcv_saddr;
+ unsigned short addr_type;
+ struct in6_addr v6_rcv_saddr;
+#define rcv_saddr v6_rcv_saddr.s6_addr32[3]
+#else
+ __be32 rcv_saddr;
#endif
- __be32 rcv_saddr;
- };
/* Node in the bhash2 inet_bind_hashbucket chain */
struct hlist_node node;
+ struct hlist_node bhash_node;
/* List of sockets hashed to this bucket */
struct hlist_head owners;
- /* bhash has twsk in owners, but bhash2 has twsk in
- * deathrow not to add a member in struct sock_common.
- */
- struct hlist_head deathrow;
};
static inline struct net *ib_net(const struct inet_bind_bucket *ib)
@@ -241,7 +236,7 @@ bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
struct inet_bind2_bucket *
inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
- unsigned short port, int l3mdev,
+ struct inet_bind_bucket *tb,
const struct sock *sk);
void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 8d5fe15b0f..d94c242eb3 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -234,10 +234,7 @@ struct inet_sock {
int uc_index;
int mc_index;
__be32 mc_addr;
- struct {
- __u16 lo;
- __u16 hi;
- } local_port_range;
+ u32 local_port_range; /* high << 16 | low */
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork_full cork;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index b14999ff55..f28da08a37 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -75,13 +75,9 @@ struct inet_timewait_sock {
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
struct inet_bind2_bucket *tw_tb2;
- struct hlist_node tw_bind2_node;
};
#define tw_tclass tw_tos
-#define twsk_for_each_bound_bhash2(__tw, list) \
- hlist_for_each_entry(__tw, list, tw_bind2_node)
-
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
diff --git a/include/net/ip.h b/include/net/ip.h
index 6390dd08da..25cb688bdc 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -349,8 +349,14 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
-void inet_get_local_port_range(const struct net *net, int *low, int *high);
-void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
+static inline void inet_get_local_port_range(const struct net *net, int *low, int *high)
+{
+ u32 range = READ_ONCE(net->ipv4.ip_local_ports.range);
+
+ *low = range & 0xffff;
+ *high = range >> 16;
+}
+bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
#ifdef CONFIG_SYSCTL
static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index f346b4efbc..6690939f24 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -360,6 +360,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
return pskb_network_may_pull(skb, nhlen);
}
+/* Variant of pskb_inet_may_pull().
+ */
+static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+{
+ int nhlen = 0, maclen = ETH_HLEN;
+ __be16 type = skb->protocol;
+
+ /* Essentially this is skb_protocol(skb, true)
+ * And we get MAC len.
+ */
+ if (eth_type_vlan(type))
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ switch (type) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ }
+ /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
+ * a base network header in skb->head.
+ */
+ if (!pskb_may_pull(skb, maclen + nhlen))
+ return false;
+
+ skb_set_network_header(skb, maclen);
+ return true;
+}
+
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
@@ -416,6 +449,17 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
return 0;
}
+static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IPV6))
+ return ip6_flowlabel((const struct ipv6hdr *)iph);
+ else
+ return 0;
+}
+
static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
const struct sk_buff *skb)
{
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 78d38dd88a..cf25ea21d7 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -784,11 +784,6 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
-static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
-{
- return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
-}
-
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index f9e88401d7..8b2055d64a 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -80,7 +80,7 @@ struct iucv_array {
u32 length;
} __attribute__ ((aligned (8)));
-extern struct bus_type iucv_bus;
+extern const struct bus_type iucv_bus;
extern struct device *iucv_root;
/*
@@ -489,7 +489,7 @@ struct iucv_interface {
int (*path_sever)(struct iucv_path *path, u8 userdata[16]);
int (*iucv_register)(struct iucv_handler *handler, int smp);
void (*iucv_unregister)(struct iucv_handler *handler, int smp);
- struct bus_type *bus;
+ const struct bus_type *bus;
struct device *root;
};
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 580781ff9d..d400fe2e86 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -476,9 +476,9 @@ struct ieee80211_ba_event {
/**
* struct ieee80211_event - event to be sent to the driver
* @type: The event itself. See &enum ieee80211_event_type.
- * @rssi: relevant if &type is %RSSI_EVENT
- * @mlme: relevant if &type is %AUTH_EVENT
- * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u.rssi: relevant if &type is %RSSI_EVENT
+ * @u.mlme: relevant if &type is %AUTH_EVENT
+ * @u.ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
* @u:union holding the fields above
*/
struct ieee80211_event {
@@ -541,8 +541,6 @@ struct ieee80211_fils_discovery {
* @link_id: link ID, or 0 for non-MLO
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
* @uora_exists: is the UORA element advertised by AP
- * @ack_enabled: indicates support to receive a multi-TID that solicits either
- * ACK, BACK or both
* @uora_ocw_range: UORA element's OCW Range field
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @he_support: does this BSS support HE
@@ -1150,11 +1148,6 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @ack: union part for pure ACK data
* @ack.cookie: cookie for the ACK
* @driver_data: array of driver_data pointers
- * @ampdu_ack_len: number of acked aggregated frames.
- * relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ampdu_len: number of aggregated frames.
- * relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ack_signal: signal strength of the ACK frame
*/
struct ieee80211_tx_info {
/* common information */
@@ -1362,6 +1355,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
* the frame.
+ * @RX_FLAG_MACTIME: The timestamp passed in the RX status (@mactime
+ * field) is valid if this field is non-zero, and the position
+ * where the timestamp was sampled depends on the value.
* @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the first symbol of the MPDU
* was received. This is useful in monitor mode and for proper IBSS
@@ -1371,6 +1367,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* (including FCS) was received.
* @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the SYNC preamble was received.
+ * @RX_FLAG_MACTIME_IS_RTAP_TS64: The timestamp passed in the RX status @mactime
+ * is only for use in the radiotap timestamp header, not otherwise a valid
+ * @mactime value. Note this is a separate flag so that we continue to see
+ * %RX_FLAG_MACTIME as unset. Also note that in this case the timestamp is
+ * reported to be 64 bits wide, not just 32.
* @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
* Valid only for data frames (mainly A-MPDU)
* @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
@@ -1441,12 +1442,12 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
RX_FLAG_DECRYPTED = BIT(1),
- RX_FLAG_MACTIME_PLCP_START = BIT(2),
+ RX_FLAG_ONLY_MONITOR = BIT(2),
RX_FLAG_MMIC_STRIPPED = BIT(3),
RX_FLAG_IV_STRIPPED = BIT(4),
RX_FLAG_FAILED_FCS_CRC = BIT(5),
RX_FLAG_FAILED_PLCP_CRC = BIT(6),
- RX_FLAG_MACTIME_START = BIT(7),
+ RX_FLAG_MACTIME_IS_RTAP_TS64 = BIT(7),
RX_FLAG_NO_SIGNAL_VAL = BIT(8),
RX_FLAG_AMPDU_DETAILS = BIT(9),
RX_FLAG_PN_VALIDATED = BIT(10),
@@ -1455,8 +1456,10 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_IS_LAST = BIT(13),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
- RX_FLAG_MACTIME_END = BIT(16),
- RX_FLAG_ONLY_MONITOR = BIT(17),
+ RX_FLAG_MACTIME = BIT(16) | BIT(17),
+ RX_FLAG_MACTIME_PLCP_START = 1 << 16,
+ RX_FLAG_MACTIME_START = 2 << 16,
+ RX_FLAG_MACTIME_END = 3 << 16,
RX_FLAG_SKIP_MONITOR = BIT(18),
RX_FLAG_AMSDU_MORE = BIT(19),
RX_FLAG_RADIOTAP_TLV_AT_END = BIT(20),
@@ -2686,6 +2689,9 @@ struct ieee80211_txq {
* @IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX: Hardware/driver handles transmitting
* multicast frames on all links, mac80211 should not do that.
*
+ * @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
+ * and connecting with a lower bandwidth instead
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2743,6 +2749,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP,
IEEE80211_HW_DETECTS_COLOR_COLLISION,
IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
+ IEEE80211_HW_DISALLOW_PUNCTURING,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2831,8 +2838,6 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
- * @radiotap_he: HE radiotap validity flags
- *
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* @units_pos member is set to a non-negative value then the timestamp
* field will be added and populated from the &struct ieee80211_rx_status
@@ -4267,6 +4272,8 @@ struct ieee80211_prep_tx_info {
* disable background CAC/radar detection.
* @net_fill_forward_path: Called from .ndo_fill_forward_path in order to
* resolve a path for hardware flow offloading
+ * @can_activate_links: Checks if a specific active_links bitmap is
+ * supported by the driver.
* @change_vif_links: Change the valid links on an interface, note that while
* removing the old link information is still valid (link_conf pointer),
* but may immediately disappear after the function returns. The old or
@@ -4647,6 +4654,9 @@ struct ieee80211_ops {
struct ieee80211_sta *sta,
struct net_device_path_ctx *ctx,
struct net_device_path *path);
+ bool (*can_activate_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links);
int (*change_vif_links)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
@@ -5809,12 +5819,11 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
* ieee80211_remove_key - remove the given key
* @keyconf: the parameter passed with the set key
*
+ * Context: Must be called with the wiphy mutex held.
+ *
* Remove the given key. If the key was uploaded to the hardware at the
* time this function is called, it is not deleted in the hardware but
* instead assumed to have been removed already.
- *
- * Note that due to locking considerations this function can (currently)
- * only be called during key iteration (ieee80211_iter_keys().)
*/
void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
@@ -6368,12 +6377,12 @@ ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
* @iter: iterator function that will be called for each key
* @iter_data: custom data to pass to the iterator function
*
+ * Context: Must be called with wiphy mutex held; can sleep.
+ *
* This function can be used to iterate all the keys known to
* mac80211, even those that weren't previously programmed into
* the device. This is intended for use in WoWLAN if the device
- * needs reprogramming of the keys during suspend. Note that due
- * to locking reasons, it is also only safe to call this at few
- * spots since it must hold the RTNL and be able to sleep.
+ * needs reprogramming of the keys during suspend.
*
* The order in which the keys are iterated matches the order
* in which they were originally installed and handed to the
@@ -7435,6 +7444,9 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
* @vif: interface to set active links on
* @active_links: the new active links bitmap
*
+ * Context: Must be called with wiphy mutex held; may sleep; calls
+ * back into the driver.
+ *
* This changes the active links on an interface. The interface
* must be in client mode (in AP mode, all links are always active),
* and @active_links must be a subset of the vif's valid_links.
@@ -7442,6 +7454,7 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
* If a link is switched off and another is switched on at the same
* time (e.g. active_links going from 0x1 to 0x10) then you will get
* a sequence of calls like
+ *
* - change_vif_links(0x11)
* - unassign_vif_chanctx(link_id=0)
* - change_sta_links(0x11) for each affected STA (the AP)
@@ -7451,10 +7464,6 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
* - change_sta_links(0x10) for each affected STA (the AP)
* - assign_vif_chanctx(link_id=4)
* - change_vif_links(0x10)
- *
- * Note: This function acquires some mac80211 locks and must not
- * be called with any driver locks held that could cause a
- * lock dependency inversion. Best call it without locks.
*/
int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
diff --git a/include/net/macsec.h b/include/net/macsec.h
index ebf9bc5403..de216cbc6b 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -247,6 +247,23 @@ struct macsec_secy {
/**
* struct macsec_context - MACsec context for hardware offloading
+ * @netdev: a valid pointer to a struct net_device if @offload ==
+ * MACSEC_OFFLOAD_MAC
+ * @phydev: a valid pointer to a struct phy_device if @offload ==
+ * MACSEC_OFFLOAD_PHY
+ * @offload: MACsec offload status
+ * @secy: pointer to a MACsec SecY
+ * @rx_sc: pointer to a RX SC
+ * @update_pn: when updating the SA, update the next PN
+ * @assoc_num: association number of the target SA
+ * @key: key of the target SA
+ * @rx_sa: pointer to an RX SA if a RX SA is added/updated/removed
+ * @tx_sa: pointer to an TX SA if a TX SA is added/updated/removed
+ * @tx_sc_stats: pointer to TX SC stats structure
+ * @tx_sa_stats: pointer to TX SA stats structure
+ * @rx_sc_stats: pointer to RX SC stats structure
+ * @rx_sa_stats: pointer to RX SA stats structure
+ * @dev_stats: pointer to dev stats structure
*/
struct macsec_context {
union {
@@ -277,6 +294,34 @@ struct macsec_context {
/**
* struct macsec_ops - MACsec offloading operations
+ * @mdo_dev_open: called when the MACsec interface transitions to the up state
+ * @mdo_dev_stop: called when the MACsec interface transitions to the down
+ * state
+ * @mdo_add_secy: called when a new SecY is added
+ * @mdo_upd_secy: called when the SecY flags are changed or the MAC address of
+ * the MACsec interface is changed
+ * @mdo_del_secy: called when the hw offload is disabled or the MACsec
+ * interface is removed
+ * @mdo_add_rxsc: called when a new RX SC is added
+ * @mdo_upd_rxsc: called when a certain RX SC is updated
+ * @mdo_del_rxsc: called when a certain RX SC is removed
+ * @mdo_add_rxsa: called when a new RX SA is added
+ * @mdo_upd_rxsa: called when a certain RX SA is updated
+ * @mdo_del_rxsa: called when a certain RX SA is removed
+ * @mdo_add_txsa: called when a new TX SA is added
+ * @mdo_upd_txsa: called when a certain TX SA is updated
+ * @mdo_del_txsa: called when a certain TX SA is removed
+ * @mdo_get_dev_stats: called when dev stats are read
+ * @mdo_get_tx_sc_stats: called when TX SC stats are read
+ * @mdo_get_tx_sa_stats: called when TX SA stats are read
+ * @mdo_get_rx_sc_stats: called when RX SC stats are read
+ * @mdo_get_rx_sa_stats: called when RX SA stats are read
+ * @mdo_insert_tx_tag: called to insert the TX tag
+ * @needed_headroom: number of bytes reserved at the beginning of the sk_buff
+ * for the TX tag
+ * @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
+ * TX tag
+ * @rx_uses_md_dst: whether MACsec device offload supports sk_buff md_dst
*/
struct macsec_ops {
/* Device wide */
@@ -303,6 +348,12 @@ struct macsec_ops {
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+ /* Offload tag */
+ int (*mdo_insert_tx_tag)(struct phy_device *phydev,
+ struct sk_buff *skb);
+ unsigned int needed_headroom;
+ unsigned int needed_tailroom;
+ bool rx_uses_md_dst;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
@@ -325,4 +376,9 @@ static inline void *macsec_netdev_priv(const struct net_device *dev)
return netdev_priv(dev);
}
+static inline u64 sci_to_cpu(sci_t sci)
+{
+ return be64_to_cpu((__force __be64)sci);
+}
+
#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 88b6ef7ce1..27684135bb 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -66,6 +66,7 @@ enum {
GDMA_DEVICE_NONE = 0,
GDMA_DEVICE_HWC = 1,
GDMA_DEVICE_MANA = 2,
+ GDMA_DEVICE_MANA_IB = 3,
};
struct gdma_resource {
@@ -149,6 +150,7 @@ struct gdma_general_req {
#define GDMA_MESSAGE_V1 1
#define GDMA_MESSAGE_V2 2
+#define GDMA_MESSAGE_V3 3
struct gdma_general_resp {
struct gdma_resp_hdr hdr;
@@ -293,6 +295,7 @@ struct gdma_queue {
u32 head;
u32 tail;
+ struct list_head entry;
/* Extra fields specific to EQ/CQ. */
union {
@@ -328,6 +331,7 @@ struct gdma_queue_spec {
void *context;
unsigned long log2_throttle_limit;
+ unsigned int msix_index;
} eq;
struct {
@@ -344,7 +348,9 @@ struct gdma_queue_spec {
struct gdma_irq_context {
void (*handler)(void *arg);
- void *arg;
+ /* Protect the eq_list */
+ spinlock_t lock;
+ struct list_head eq_list;
char name[MANA_IRQ_NAME_SZ];
};
@@ -355,7 +361,6 @@ struct gdma_context {
unsigned int max_num_queues;
unsigned int max_num_msix;
unsigned int num_msix_usable;
- struct gdma_resource msix_resource;
struct gdma_irq_context *irq_contexts;
/* L2 MTU */
@@ -387,6 +392,9 @@ struct gdma_context {
/* Azure network adapter */
struct gdma_dev mana;
+
+ /* Azure RDMA adapter */
+ struct gdma_dev mana_ib;
};
#define MAX_NUM_GDMA_DEVICES 4
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 6e3e9c1363..4eeedf1471 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -39,7 +39,6 @@ enum TRI_STATE {
#define COMP_ENTRY_SIZE 64
#define RX_BUFFERS_PER_QUEUE 512
-#define MANA_RX_DATA_ALIGN 64
#define MAX_SEND_BUFFERS_PER_QUEUE 256
@@ -353,6 +352,25 @@ struct mana_tx_qp {
struct mana_ethtool_stats {
u64 stop_queue;
u64 wake_queue;
+ u64 hc_rx_discards_no_wqe;
+ u64 hc_rx_err_vport_disabled;
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ u64 hc_tx_err_gf_disabled;
+ u64 hc_tx_err_vport_disabled;
+ u64 hc_tx_err_inval_vportoffset_pkt;
+ u64 hc_tx_err_vlan_enforcement;
+ u64 hc_tx_err_eth_type_enforcement;
+ u64 hc_tx_err_sa_enforcement;
+ u64 hc_tx_err_sqpdid_enforcement;
+ u64 hc_tx_err_cqpdid_enforcement;
+ u64 hc_tx_err_mtu_violation;
+ u64 hc_tx_err_inval_oob;
u64 hc_tx_bytes;
u64 hc_tx_ucast_pkts;
u64 hc_tx_ucast_bytes;
@@ -360,6 +378,7 @@ struct mana_ethtool_stats {
u64 hc_tx_bcast_bytes;
u64 hc_tx_mcast_pkts;
u64 hc_tx_mcast_bytes;
+ u64 hc_tx_err_gdma;
u64 tx_cqe_err;
u64 tx_cqe_unknown_type;
u64 rx_coalesced_err;
@@ -602,8 +621,8 @@ struct mana_query_gf_stat_resp {
struct gdma_resp_hdr hdr;
u64 reported_stats;
/* rx errors/discards */
- u64 discard_rx_nowqe;
- u64 err_rx_vport_disabled;
+ u64 rx_discards_nowqe;
+ u64 rx_err_vport_disabled;
/* rx bytes/packets */
u64 hc_rx_bytes;
u64 hc_rx_ucast_pkts;
@@ -613,16 +632,16 @@ struct mana_query_gf_stat_resp {
u64 hc_rx_mcast_pkts;
u64 hc_rx_mcast_bytes;
/* tx errors */
- u64 err_tx_gf_disabled;
- u64 err_tx_vport_disabled;
- u64 err_tx_inval_vport_offset_pkt;
- u64 err_tx_vlan_enforcement;
- u64 err_tx_ethtype_enforcement;
- u64 err_tx_SA_enforecement;
- u64 err_tx_SQPDID_enforcement;
- u64 err_tx_CQPDID_enforcement;
- u64 err_tx_mtu_violation;
- u64 err_tx_inval_oob;
+ u64 tx_err_gf_disabled;
+ u64 tx_err_vport_disabled;
+ u64 tx_err_inval_vport_offset_pkt;
+ u64 tx_err_vlan_enforcement;
+ u64 tx_err_ethtype_enforcement;
+ u64 tx_err_SA_enforcement;
+ u64 tx_err_SQPDID_enforcement;
+ u64 tx_err_CQPDID_enforcement;
+ u64 tx_err_mtu_violation;
+ u64 tx_err_inval_oob;
/* tx bytes/packets */
u64 hc_tx_bytes;
u64 hc_tx_ucast_pkts;
@@ -632,7 +651,7 @@ struct mana_query_gf_stat_resp {
u64 hc_tx_mcast_pkts;
u64 hc_tx_mcast_bytes;
/* tx error */
- u64 err_tx_gdma;
+ u64 tx_err_gdma;
}; /* HW DATA */
/* Configure vPort Rx Steering */
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index cdcafb30d4..aa1716fb0e 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -21,6 +21,10 @@ struct netdev_rx_queue {
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
} ____cacheline_aligned_in_smp;
/*
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 4a767b3d20..9abb7ee40d 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -74,12 +74,13 @@ enum nf_flowtable_flags {
};
struct nf_flowtable {
- struct list_head list;
- struct rhashtable rhashtable;
- int priority;
+ unsigned int flags; /* readonly in datapath */
+ int priority; /* control path (padding hole) */
+ struct rhashtable rhashtable; /* datapath, read-mostly members come first */
+
+ struct list_head list; /* slowpath parts */
const struct nf_flowtable_type *type;
struct delayed_work gc_work;
- unsigned int flags;
struct flow_block flow_block;
struct rw_semaphore flow_block_lock; /* Guards flow_block */
possible_net_t net;
@@ -335,7 +336,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
{
__be16 proto;
@@ -351,6 +352,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
return 0;
}
+static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
+{
+ if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
+ return false;
+
+ *inner_proto = __nf_flow_pppoe_proto(skb);
+
+ return true;
+}
+
#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 62013d0184..1cf9cb0f0a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
* @nla: netlink attributes
* @portid: netlink portID of the original message
* @seq: netlink sequence number
+ * @flags: modifiers to new request
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
@@ -282,6 +283,7 @@ struct nft_elem_priv { };
*
* @key: element key
* @key_end: closing element key
+ * @data: element data
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -305,9 +307,23 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
return (void *)priv;
}
+
+/**
+ * enum nft_iter_type - nftables set iterator type
+ *
+ * @NFT_ITER_READ: read-only iteration over set elements
+ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
+ */
+enum nft_iter_type {
+ NFT_ITER_UNSPEC,
+ NFT_ITER_READ,
+ NFT_ITER_UPDATE,
+};
+
struct nft_set;
struct nft_set_iter {
u8 genmask;
+ enum nft_iter_type type:8;
unsigned int count;
unsigned int skip;
int err;
@@ -325,10 +341,10 @@ struct nft_set_iter {
* @dtype: data type
* @dlen: data length
* @objtype: object type
- * @flags: flags
* @size: number of set elements
* @policy: set policy
* @gc_int: garbage collector interval
+ * @timeout: element timeout
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
* @expr: set must support for expressions
@@ -351,9 +367,9 @@ struct nft_set_desc {
/**
* enum nft_set_class - performance class
*
- * @NFT_LOOKUP_O_1: constant, O(1)
- * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
- * @NFT_LOOKUP_O_N: linear, O(N)
+ * @NFT_SET_CLASS_O_1: constant, O(1)
+ * @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
+ * @NFT_SET_CLASS_O_N: linear, O(N)
*/
enum nft_set_class {
NFT_SET_CLASS_O_1,
@@ -422,9 +438,13 @@ struct nft_set_ext;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
+ * @commit: commit set elements
+ * @abort: abort set elements
* @privsize: function to return size of set private data
+ * @estimate: estimate the required memory size and the lookup complexity class
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
+ * @gc_init: initialize garbage collection
* @elemsize: element private size
*
* Operations lookup, update and delete have simpler interfaces, are faster
@@ -540,13 +560,16 @@ struct nft_set_elem_expr {
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
* @udata: user data
- * @expr: stateful expression
+ * @pending_update: list of pending update set element
* @ops: set ops
* @flags: set flags
* @dead: set will be freed, never cleared
* @genmask: generation mask
* @klen: key length
* @dlen: data length
+ * @num_exprs: numbers of exprs
+ * @exprs: stateful expression
+ * @catchall_list: list of catch-all set element
* @data: private set data
*/
struct nft_set {
@@ -692,6 +715,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
*
* @len: length of extension area
* @offset: offsets of individual extension types
+ * @ext_len: length of the expected extension(used to sanity check)
*/
struct nft_set_ext_tmpl {
u16 len;
@@ -846,6 +870,7 @@ struct nft_expr_ops;
* @select_ops: function to select nft_expr_ops
* @release_ops: release nft_expr_ops
* @ops: default ops, used when no select_ops functions is present
+ * @inner_ops: inner ops, used for inner packet operation
* @list: used internally
* @name: Identifier
* @owner: module reference
@@ -887,14 +912,22 @@ struct nft_offload_ctx;
* struct nft_expr_ops - nf_tables expression operations
*
* @eval: Expression evaluation function
+ * @clone: Expression clone function
* @size: full expression size, including private data size
* @init: initialization function
* @activate: activate expression in the next generation
* @deactivate: deactivate expression in next generation
* @destroy: destruction function, called after synchronize_rcu
+ * @destroy_clone: destruction clone function
* @dump: function to dump parameters
- * @type: expression type
* @validate: validate expression, called during loop detection
+ * @reduce: reduce expression
+ * @gc: garbage collection expression
+ * @offload: hardware offload expression
+ * @offload_action: function to report true/false to allocate one slot or not in the flow
+ * offload array
+ * @offload_stats: function to synchronize hardware stats via updating the counter expression
+ * @type: expression type
* @data: extra data to attach to this expression operation
*/
struct nft_expr_ops {
@@ -1047,14 +1080,21 @@ struct nft_rule_blob {
/**
* struct nft_chain - nf_tables chain
*
+ * @blob_gen_0: rule blob pointer to the current generation
+ * @blob_gen_1: rule blob pointer to the future generation
* @rules: list of rules in the chain
* @list: used internally
* @rhlhead: used internally
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
- * @flags: bitmask of enum nft_chain_flags
+ * @flags: bitmask of enum NFTA_CHAIN_FLAGS
+ * @bound: bind or not
+ * @genmask: generation mask
* @name: name of the chain
+ * @udlen: user data length
+ * @udata: user data in the chain
+ * @blob_next: rule blob pointer to the next in the chain
*/
struct nft_chain {
struct nft_rule_blob __rcu *blob_gen_0;
@@ -1152,6 +1192,7 @@ struct nft_hook {
* @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
* @type: chain type
* @policy: default policy
+ * @flags: indicate the base chain disabled or not
* @stats: per-cpu chain stats
* @chain: the chain
* @flow_block: flow block (for hardware offload)
@@ -1280,11 +1321,13 @@ struct nft_object_hash_key {
* struct nft_object - nf_tables stateful object
*
* @list: table stateful object list node
- * @key: keys that identify this object
* @rhlhead: nft_objname_ht node
+ * @key: keys that identify this object
* @genmask: generation mask
* @use: number of references to this stateful object
* @handle: unique object handle
+ * @udlen: length of user data
+ * @udata: user data
* @ops: object operations
* @data: object data, layout depends on type
*/
@@ -1352,6 +1395,7 @@ struct nft_object_type {
* @destroy: release existing stateful object
* @dump: netlink dump stateful object
* @update: update stateful object
+ * @type: pointer to object type
*/
struct nft_object_ops {
void (*eval)(struct nft_object *obj,
@@ -1387,9 +1431,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @genmask: generation mask
* @use: number of references to this flow table
* @handle: unique object handle
- * @dev_name: array of device names
+ * @hook_list: hook list for hooks per net_device in flowtables
* @data: rhashtable and garbage collector
- * @ops: array of hooks
*/
struct nft_flowtable {
struct list_head list;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 83bdf787ae..c19ff921b6 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1011,6 +1011,20 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
}
/**
+ * nlmsg_new_large - Allocate a new netlink message with non-contiguous
+ * physical memory
+ * @payload: size of the message payload
+ *
+ * The allocated skb is unable to have frag page for shinfo->frags*,
+ * as the NULL setting for skb->head in netlink_skb_destructor() will
+ * bypass most of the handling in skb_release_data()
+ */
+static inline struct sk_buff *nlmsg_new_large(size_t payload)
+{
+ return netlink_alloc_large_skb(nlmsg_total_size(payload), 0);
+}
+
+/**
* nlmsg_end - Finalize a netlink message
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
@@ -1073,21 +1087,29 @@ static inline void nlmsg_free(struct sk_buff *skb)
}
/**
- * nlmsg_multicast - multicast a netlink message
+ * nlmsg_multicast_filtered - multicast a netlink message with filter function
* @sk: netlink socket to spread messages to
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: multicast group id
* @flags: allocation flags
+ * @filter: filter function
+ * @filter_data: filter function private data
+ *
+ * Return: 0 on success, negative error code for failure.
*/
-static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
- u32 portid, unsigned int group, gfp_t flags)
+static inline int nlmsg_multicast_filtered(struct sock *sk, struct sk_buff *skb,
+ u32 portid, unsigned int group,
+ gfp_t flags,
+ netlink_filter_fn filter,
+ void *filter_data)
{
int err;
NETLINK_CB(skb).dst_group = group;
- err = netlink_broadcast(sk, skb, portid, group, flags);
+ err = netlink_broadcast_filtered(sk, skb, portid, group, flags,
+ filter, filter_data);
if (err > 0)
err = 0;
@@ -1095,6 +1117,21 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
}
/**
+ * nlmsg_multicast - multicast a netlink message
+ * @sk: netlink socket to spread messages to
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: multicast group id
+ * @flags: allocation flags
+ */
+static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags)
+{
+ return nlmsg_multicast_filtered(sk, skb, portid, group, flags,
+ NULL, NULL);
+}
+
+/**
* nlmsg_unicast - unicast a netlink message
* @sk: netlink socket to spread message to
* @skb: netlink message as socket buffer
@@ -1200,7 +1237,7 @@ static inline void *nla_data(const struct nlattr *nla)
* nla_len - length of payload
* @nla: netlink attribute
*/
-static inline int nla_len(const struct nlattr *nla)
+static inline u16 nla_len(const struct nlattr *nla)
{
return nla->nla_len - NLA_HDRLEN;
}
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index a91ef9f8de..78214f1b43 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -13,6 +13,7 @@ struct netns_core {
struct ctl_table_header *sysctl_hdr;
int sysctl_somaxconn;
+ int sysctl_optmem_max;
u8 sysctl_txrehash;
#ifdef CONFIG_PROC_FS
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 73f43f6991..c356c458b3 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -19,8 +19,7 @@ struct hlist_head;
struct fib_table;
struct sock;
struct local_ports {
- seqlock_t lock;
- int range[2];
+ u32 range; /* high << 16 | low */
bool warned;
};
@@ -42,6 +41,38 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
struct netns_ipv4 {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst.
+ * Please update the document when adding new fields.
+ */
+
+ /* TX readonly hotpath cache lines */
+ __cacheline_group_begin(netns_ipv4_read_tx);
+ u8 sysctl_tcp_early_retrans;
+ u8 sysctl_tcp_tso_win_divisor;
+ u8 sysctl_tcp_tso_rtt_log;
+ u8 sysctl_tcp_autocorking;
+ int sysctl_tcp_min_snd_mss;
+ unsigned int sysctl_tcp_notsent_lowat;
+ int sysctl_tcp_limit_output_bytes;
+ int sysctl_tcp_min_rtt_wlen;
+ int sysctl_tcp_wmem[3];
+ u8 sysctl_ip_fwd_use_pmtu;
+ __cacheline_group_end(netns_ipv4_read_tx);
+
+ /* TXRX readonly hotpath cache lines */
+ __cacheline_group_begin(netns_ipv4_read_txrx);
+ u8 sysctl_tcp_moderate_rcvbuf;
+ __cacheline_group_end(netns_ipv4_read_txrx);
+
+ /* RX readonly hotpath cache line */
+ __cacheline_group_begin(netns_ipv4_read_rx);
+ u8 sysctl_ip_early_demux;
+ u8 sysctl_tcp_early_demux;
+ int sysctl_tcp_reordering;
+ int sysctl_tcp_rmem[3];
+ __cacheline_group_end(netns_ipv4_read_rx);
+
struct inet_timewait_death_row tcp_death_row;
struct udp_table *udp_table;
@@ -96,17 +127,14 @@ struct netns_ipv4 {
u8 sysctl_ip_default_ttl;
u8 sysctl_ip_no_pmtu_disc;
- u8 sysctl_ip_fwd_use_pmtu;
u8 sysctl_ip_fwd_update_priority;
u8 sysctl_ip_nonlocal_bind;
u8 sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
u8 sysctl_ip_dynaddr;
- u8 sysctl_ip_early_demux;
#ifdef CONFIG_NET_L3_MASTER_DEV
u8 sysctl_raw_l3mdev_accept;
#endif
- u8 sysctl_tcp_early_demux;
u8 sysctl_udp_early_demux;
u8 sysctl_nexthop_compat_mode;
@@ -119,7 +147,6 @@ struct netns_ipv4 {
u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
- int sysctl_tcp_min_snd_mss;
int sysctl_tcp_probe_threshold;
u32 sysctl_tcp_probe_interval;
@@ -135,17 +162,14 @@ struct netns_ipv4 {
u8 sysctl_tcp_backlog_ack_defer;
u8 sysctl_tcp_pingpong_thresh;
- int sysctl_tcp_reordering;
u8 sysctl_tcp_retries1;
u8 sysctl_tcp_retries2;
u8 sysctl_tcp_orphan_retries;
u8 sysctl_tcp_tw_reuse;
int sysctl_tcp_fin_timeout;
- unsigned int sysctl_tcp_notsent_lowat;
u8 sysctl_tcp_sack;
u8 sysctl_tcp_window_scaling;
u8 sysctl_tcp_timestamps;
- u8 sysctl_tcp_early_retrans;
u8 sysctl_tcp_recovery;
u8 sysctl_tcp_thin_linear_timeouts;
u8 sysctl_tcp_slow_start_after_idle;
@@ -161,21 +185,13 @@ struct netns_ipv4 {
u8 sysctl_tcp_frto;
u8 sysctl_tcp_nometrics_save;
u8 sysctl_tcp_no_ssthresh_metrics_save;
- u8 sysctl_tcp_moderate_rcvbuf;
- u8 sysctl_tcp_tso_win_divisor;
u8 sysctl_tcp_workaround_signed_windows;
- int sysctl_tcp_limit_output_bytes;
int sysctl_tcp_challenge_ack_limit;
- int sysctl_tcp_min_rtt_wlen;
u8 sysctl_tcp_min_tso_segs;
- u8 sysctl_tcp_tso_rtt_log;
- u8 sysctl_tcp_autocorking;
u8 sysctl_tcp_reflect_tos;
int sysctl_tcp_invalid_ratelimit;
int sysctl_tcp_pacing_ss_ratio;
int sysctl_tcp_pacing_ca_ratio;
- int sysctl_tcp_wmem[3];
- int sysctl_tcp_rmem[3];
unsigned int sysctl_tcp_child_ehash_entries;
unsigned long sysctl_tcp_comp_sack_delay_ns;
unsigned long sysctl_tcp_comp_sack_slack_ns;
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
index 582212ada3..fc752a50f9 100644
--- a/include/net/netns/smc.h
+++ b/include/net/netns/smc.h
@@ -22,5 +22,7 @@ struct netns_smc {
int sysctl_smcr_testlink_time;
int sysctl_wmem;
int sysctl_rmem;
+ int sysctl_max_links_per_lgr;
+ int sysctl_max_conns_per_lgr;
};
#endif
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 8cd9d141f5..4c752f7999 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -78,6 +78,10 @@ enum nl802154_commands {
NL802154_CMD_SCAN_DONE,
NL802154_CMD_SEND_BEACONS,
NL802154_CMD_STOP_BEACONS,
+ NL802154_CMD_ASSOCIATE,
+ NL802154_CMD_DISASSOCIATE,
+ NL802154_CMD_SET_MAX_ASSOCIATIONS,
+ NL802154_CMD_LIST_ASSOCIATIONS,
/* add new commands above here */
@@ -147,6 +151,8 @@ enum nl802154_attrs {
NL802154_ATTR_SCAN_DURATION,
NL802154_ATTR_SCAN_DONE_REASON,
NL802154_ATTR_BEACON_INTERVAL,
+ NL802154_ATTR_MAX_ASSOCIATIONS,
+ NL802154_ATTR_PEER,
/* add attributes here, update the policy in nl802154.c */
@@ -385,8 +391,6 @@ enum nl802154_supported_bool_states {
NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
};
-#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
-
enum nl802154_dev_addr_modes {
NL802154_DEV_ADDR_NONE,
__NL802154_DEV_ADDR_INVALID,
@@ -406,12 +410,26 @@ enum nl802154_dev_addr_attrs {
NL802154_DEV_ADDR_ATTR_SHORT,
NL802154_DEV_ADDR_ATTR_EXTENDED,
NL802154_DEV_ADDR_ATTR_PAD,
+ NL802154_DEV_ADDR_ATTR_PEER_TYPE,
/* keep last */
__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
};
+enum nl802154_peer_type {
+ NL802154_PEER_TYPE_UNSPEC,
+
+ NL802154_PEER_TYPE_PARENT,
+ NL802154_PEER_TYPE_CHILD,
+
+ /* keep last */
+ __NL802154_PEER_TYPE_AFTER_LAST,
+ NL802154_PEER_TYPE_MAX = __NL802154_PEER_TYPE_AFTER_LAST - 1
+};
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+
enum nl802154_key_id_modes {
NL802154_KEY_ID_MODE_IMPLICIT,
NL802154_KEY_ID_MODE_INDEX,
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 4ebd544ae9..1d397c1a00 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -11,7 +11,7 @@
* The page_pool allocator is optimized for recycling page or page fragment used
* by skb packet and xdp frame.
*
- * Basic use involves replacing and alloc_pages() calls with page_pool_alloc(),
+ * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(),
* which allocate memory with or without page splitting depending on the
* requested memory size.
*
@@ -29,7 +29,7 @@
* page allocated from page pool. Page splitting enables memory saving and thus
* avoids TLB/cache miss for data access, but there also is some cost to
* implement page splitting, mainly some cache line dirtying/bouncing for
- * 'struct page' and atomic operation for page->pp_frag_count.
+ * 'struct page' and atomic operation for page->pp_ref_count.
*
* The API keeps track of in-flight pages, in order to let API users know when
* it is safe to free a page_pool object, the API users must call
@@ -37,15 +37,15 @@
* attach the page_pool object to a page_pool-aware object like skbs marked with
* skb_mark_for_recycle().
*
- * page_pool_put_page() may be called multi times on the same page if a page is
- * split into multi fragments. For the last fragment, it will either recycle the
- * page, or in case of page->_refcount > 1, it will release the DMA mapping and
- * in-flight state accounting.
+ * page_pool_put_page() may be called multiple times on the same page if a page
+ * is split into multiple fragments. For the last fragment, it will either
+ * recycle the page, or in case of page->_refcount > 1, it will release the DMA
+ * mapping and in-flight state accounting.
*
* dma_sync_single_range_for_device() is only called for the last fragment when
* page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the
* last freed fragment to do the sync_for_device operation for all fragments in
- * the same page when a page is split, the API user must setup pool->p.max_len
+ * the same page when a page is split. The API user must setup pool->p.max_len
* and pool->p.offset correctly and ensure that page_pool_put_page() is called
* with dma_sync_size being -1 for fragment API.
*/
@@ -55,16 +55,12 @@
#include <net/page_pool/types.h>
#ifdef CONFIG_PAGE_POOL_STATS
+/* Deprecated driver-facing API, use netlink instead */
int page_pool_ethtool_stats_get_count(void);
u8 *page_pool_ethtool_stats_get_strings(u8 *data);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
-/*
- * Drivers that wish to harvest page pool stats and report them to users
- * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
- * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
- */
-bool page_pool_get_stats(struct page_pool *pool,
+bool page_pool_get_stats(const struct page_pool *pool,
struct page_pool_stats *stats);
#else
static inline int page_pool_ethtool_stats_get_count(void)
@@ -214,69 +210,82 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
return pool->p.dma_dir;
}
-/* pp_frag_count represents the number of writers who can update the page
- * either by updating skb->data or via DMA mappings for the device.
- * We can't rely on the page refcnt for that as we don't know who might be
- * holding page references and we can't reliably destroy or sync DMA mappings
- * of the fragments.
+/**
+ * page_pool_fragment_page() - split a fresh page into fragments
+ * @page: page to split
+ * @nr: references to set
+ *
+ * pp_ref_count represents the number of outstanding references to the page,
+ * which will be freed using page_pool APIs (rather than page allocator APIs
+ * like put_page()). Such references are usually held by page_pool-aware
+ * objects like skbs marked for page pool recycling.
*
- * When pp_frag_count reaches 0 we can either recycle the page if the page
- * refcnt is 1 or return it back to the memory allocator and destroy any
- * mappings we have.
+ * This helper allows the caller to take (set) multiple references to a
+ * freshly allocated page. The page must be freshly allocated (have a
+ * pp_ref_count of 1). This is commonly done by drivers and
+ * "fragment allocators" to save atomic operations - either when they know
+ * upfront how many references they will need; or to take MAX references and
+ * return the unused ones with a single atomic dec(), instead of performing
+ * multiple atomic inc() operations.
*/
static inline void page_pool_fragment_page(struct page *page, long nr)
{
- atomic_long_set(&page->pp_frag_count, nr);
+ atomic_long_set(&page->pp_ref_count, nr);
}
-static inline long page_pool_defrag_page(struct page *page, long nr)
+static inline long page_pool_unref_page(struct page *page, long nr)
{
long ret;
- /* If nr == pp_frag_count then we have cleared all remaining
+ /* If nr == pp_ref_count then we have cleared all remaining
* references to the page:
* 1. 'n == 1': no need to actually overwrite it.
* 2. 'n != 1': overwrite it with one, which is the rare case
- * for pp_frag_count draining.
+ * for pp_ref_count draining.
*
* The main advantage to doing this is that not only we avoid a atomic
* update, as an atomic_read is generally a much cheaper operation than
* an atomic update, especially when dealing with a page that may be
- * partitioned into only 2 or 3 pieces; but also unify the pp_frag_count
+ * referenced by only 2 or 3 users; but also unify the pp_ref_count
* handling by ensuring all pages have partitioned into only 1 piece
* initially, and only overwrite it when the page is partitioned into
* more than one piece.
*/
- if (atomic_long_read(&page->pp_frag_count) == nr) {
+ if (atomic_long_read(&page->pp_ref_count) == nr) {
/* As we have ensured nr is always one for constant case using
* the BUILD_BUG_ON(), only need to handle the non-constant case
- * here for pp_frag_count draining, which is a rare case.
+ * here for pp_ref_count draining, which is a rare case.
*/
BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
if (!__builtin_constant_p(nr))
- atomic_long_set(&page->pp_frag_count, 1);
+ atomic_long_set(&page->pp_ref_count, 1);
return 0;
}
- ret = atomic_long_sub_return(nr, &page->pp_frag_count);
+ ret = atomic_long_sub_return(nr, &page->pp_ref_count);
WARN_ON(ret < 0);
- /* We are the last user here too, reset pp_frag_count back to 1 to
+ /* We are the last user here too, reset pp_ref_count back to 1 to
* ensure all pages have been partitioned into 1 piece initially,
* this should be the rare case when the last two fragment users call
- * page_pool_defrag_page() currently.
+ * page_pool_unref_page() currently.
*/
if (unlikely(!ret))
- atomic_long_set(&page->pp_frag_count, 1);
+ atomic_long_set(&page->pp_ref_count, 1);
return ret;
}
-static inline bool page_pool_is_last_frag(struct page *page)
+static inline void page_pool_ref_page(struct page *page)
+{
+ atomic_long_inc(&page->pp_ref_count);
+}
+
+static inline bool page_pool_is_last_ref(struct page *page)
{
- /* If page_pool_defrag_page() returns 0, we were the last user */
- return page_pool_defrag_page(page, 1) == 0;
+ /* If page_pool_unref_page() returns 0, we were the last user */
+ return page_pool_unref_page(page, 1) == 0;
}
/**
@@ -301,10 +310,10 @@ static inline void page_pool_put_page(struct page_pool *pool,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_frag(page))
+ if (!page_pool_is_last_ref(page))
return;
- page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
+ page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
#endif
}
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 6fc5134095..76481c4653 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -5,6 +5,7 @@
#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
+#include <linux/types.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap
@@ -48,24 +49,30 @@ struct pp_alloc_cache {
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
* @dev: device, for DMA pre-mapping purposes
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
*/
struct page_pool_params {
- unsigned int flags;
- unsigned int order;
- unsigned int pool_size;
- int nid;
- struct device *dev;
- struct napi_struct *napi;
- enum dma_data_direction dma_dir;
- unsigned int max_len;
- unsigned int offset;
+ struct_group_tagged(page_pool_params_fast, fast,
+ unsigned int flags;
+ unsigned int order;
+ unsigned int pool_size;
+ int nid;
+ struct device *dev;
+ struct napi_struct *napi;
+ enum dma_data_direction dma_dir;
+ unsigned int max_len;
+ unsigned int offset;
+ );
+ struct_group_tagged(page_pool_params_slow, slow,
+ struct net_device *netdev;
/* private: used by test code only */
- void (*init_callback)(struct page *page, void *arg);
- void *init_arg;
+ void (*init_callback)(struct page *page, void *arg);
+ void *init_arg;
+ );
};
#ifdef CONFIG_PAGE_POOL_STATS
@@ -119,7 +126,9 @@ struct page_pool_stats {
#endif
struct page_pool {
- struct page_pool_params p;
+ struct page_pool_params_fast p;
+
+ bool has_init_callback;
long frag_users;
struct page *frag_page;
@@ -178,6 +187,16 @@ struct page_pool {
refcount_t user_cnt;
u64 destroy_cnt;
+
+ /* Slow/Control-path information follows */
+ struct page_pool_params_slow slow;
+ /* User-facing fields, protected by page_pools_lock */
+ struct {
+ struct hlist_node list;
+ u64 detach_time;
+ u32 napi_id;
+ u32 id;
+ } user;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -215,9 +234,9 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
}
#endif
-void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size,
- bool allow_direct);
+void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct);
static inline bool is_page_pool_compiled_in(void)
{
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a76c9171db..f308e82686 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -154,12 +154,6 @@ __cls_set_class(unsigned long *clp, unsigned long cl)
return xchg(clp, cl);
}
-static inline void tcf_set_drop_reason(struct tcf_result *res,
- enum skb_drop_reason reason)
-{
- res->drop_reason = reason;
-}
-
static inline void
__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
{
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 9fa1d0794d..1e200d9a06 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -275,24 +275,6 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
skb->tstamp = ktime_set(0, 0);
}
-struct tc_skb_cb {
- struct qdisc_skb_cb qdisc_cb;
-
- u16 mru;
- u8 post_ct:1;
- u8 post_ct_snat:1;
- u8 post_ct_dnat:1;
- u16 zone; /* Only valid if post_ct = true */
-};
-
-static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
-{
- struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
-
- BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
- return cb;
-}
-
static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
unsigned long cl,
struct qdisc_walker *arg)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 959a7725c2..41ca14e81d 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -19,6 +19,7 @@
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
#include <net/flow_offload.h>
+#include <linux/xarray.h>
struct Qdisc_ops;
struct qdisc_walker;
@@ -116,6 +117,7 @@ struct Qdisc {
struct qdisc_skb_head q;
struct gnet_stats_basic_sync bstats;
struct gnet_stats_queue qstats;
+ int owner;
unsigned long state;
unsigned long state2; /* must be written under qdisc spinlock */
struct Qdisc *next_sched;
@@ -237,12 +239,7 @@ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{
-#ifdef CONFIG_BQL
- /* Non-BQL migrated drivers will return 0, too. */
- return dql_avail(&txq->dql);
-#else
- return 0;
-#endif
+ return netdev_queue_dql_avail(txq);
}
struct Qdisc_class_ops {
@@ -332,7 +329,6 @@ struct tcf_result {
};
const struct tcf_proto *goto_tp;
};
- enum skb_drop_reason drop_reason;
};
struct tcf_chain;
@@ -461,6 +457,7 @@ struct tcf_chain {
};
struct tcf_block {
+ struct xarray ports; /* datapath accessible */
/* Lock protects tcf_block and lifetime-management data of chains
* attached to the block (refcnt, action_refcnt, explicitly_created).
*/
@@ -487,6 +484,8 @@ struct tcf_block {
struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
};
+struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index);
+
static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
return lockdep_is_held(&chain->filter_chain_lock);
@@ -1041,6 +1040,37 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
return skb;
}
+struct tc_skb_cb {
+ struct qdisc_skb_cb qdisc_cb;
+ u32 drop_reason;
+
+ u16 zone; /* Only valid if post_ct = true */
+ u16 mru;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ return cb;
+}
+
+static inline enum skb_drop_reason
+tcf_get_drop_reason(const struct sk_buff *skb)
+{
+ return tc_skb_cb(skb)->drop_reason;
+}
+
+static inline void tcf_set_drop_reason(const struct sk_buff *skb,
+ enum skb_drop_reason reason)
+{
+ tc_skb_cb(skb)->drop_reason = reason;
+}
+
/* Instead of calling kfree_skb() while root qdisc lock is held,
* queue the skb for future freeing at end of __dev_xmit_skb()
*/
diff --git a/include/net/scm.h b/include/net/scm.h
index e8c76b4be2..cf68acec4d 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -5,6 +5,7 @@
#include <linux/limits.h>
#include <linux/net.h>
#include <linux/cred.h>
+#include <linux/file.h>
#include <linux/security.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
@@ -208,5 +209,13 @@ static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg,
scm_destroy_cred(scm);
}
+static inline int scm_recv_one_fd(struct file *f, int __user *ufd,
+ unsigned int flags)
+{
+ if (!ufd)
+ return -EFAULT;
+ return receive_fd(f, ufd, flags);
+}
+
#endif /* __LINUX_NET_SCM_H */
diff --git a/include/net/smc.h b/include/net/smc.h
index a002552be2..c9dcb30e3f 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -52,9 +52,14 @@ struct smcd_dmb {
struct smcd_dev;
struct ism_client;
+struct smcd_gid {
+ u64 gid;
+ u64 gid_ext;
+};
+
struct smcd_ops {
- int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
- u32 vid);
+ int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid,
+ u32 vid_valid, u32 vid);
int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
struct ism_client *client);
int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
@@ -62,14 +67,13 @@ struct smcd_ops {
int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
int (*set_vlan_required)(struct smcd_dev *dev);
int (*reset_vlan_required)(struct smcd_dev *dev);
- int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info);
+ int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size);
int (*supports_v2)(void);
- u8* (*get_system_eid)(void);
- u64 (*get_local_gid)(struct smcd_dev *dev);
+ void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid);
u16 (*get_chid)(struct smcd_dev *dev);
struct device* (*get_dev)(struct smcd_dev *dev);
};
diff --git a/include/net/sock.h b/include/net/sock.h
index f9a9f61fa1..54a7967613 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -76,19 +76,6 @@
* the other protocols.
*/
-/* Define this to get the SOCK_DBG debugging facility. */
-#define SOCK_DEBUGGING
-#ifdef SOCK_DEBUGGING
-#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
- printk(KERN_DEBUG msg); } while (0)
-#else
-/* Validate arguments and do nothing */
-static inline __printf(2, 3)
-void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
-{
-}
-#endif
-
/* This is the per-socket lock. The spinlock provides a synchronization
* between user contexts and software interrupt processing, whereas the
* mini-semaphore synchronizes multiple users amongst themselves.
@@ -277,8 +264,6 @@ struct sk_filter;
* @sk_pacing_status: Pacing status (requested, handled by sch_fq)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
- * @__sk_flags_offset: empty field used to determine location of bitfield
- * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -352,7 +337,6 @@ struct sk_filter;
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
- * @sk_bind2_node: bind node in the bhash2 table
*/
struct sock {
/*
@@ -544,7 +528,6 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
- struct hlist_node sk_bind2_node;
};
enum sk_pacing {
@@ -873,16 +856,6 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list);
}
-static inline void __sk_del_bind2_node(struct sock *sk)
-{
- __hlist_del(&sk->sk_bind2_node);
-}
-
-static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
-{
- hlist_add_head(&sk->sk_bind2_node, list);
-}
-
#define sk_for_each(__sk, list) \
hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, list) \
@@ -900,8 +873,6 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
-#define sk_for_each_bound_bhash2(__sk, list) \
- hlist_for_each_entry(__sk, list, sk_bind2_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -1458,33 +1429,36 @@ sk_memory_allocated(const struct sock *sk)
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
+
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+ int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+ if (val)
+ atomic_long_add(val, proto->memory_allocated);
+}
static inline void
-sk_memory_allocated_add(struct sock *sk, int amt)
+sk_memory_allocated_add(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt)
+sk_memory_allocated_sub(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
@@ -1808,6 +1782,13 @@ static inline void sock_owned_by_me(const struct sock *sk)
#endif
}
+static inline void sock_not_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
static inline bool sock_owned_by_user(const struct sock *sk)
{
sock_owned_by_me(sk);
@@ -2941,7 +2922,6 @@ extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
extern int sysctl_tstamp_allow_data;
-extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
deleted file mode 100644
index 4225fcb1c6..0000000000
--- a/include/net/tc_act/tc_ipt.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NET_TC_IPT_H
-#define __NET_TC_IPT_H
-
-#include <net/act_api.h>
-
-struct xt_entry_target;
-
-struct tcf_ipt {
- struct tc_action common;
- u32 tcfi_hook;
- char *tcfi_tname;
- struct xt_entry_target *tcfi_t;
-};
-#define to_ipt(a) ((struct tcf_ipt *)a)
-
-#endif /* __NET_TC_IPT_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 32ce8ea369..75722d967b 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,6 +8,7 @@
struct tcf_mirred {
struct tc_action common;
int tcfm_eaction;
+ u32 tcfm_blockid;
bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
netdevice_tracker tcfm_dev_tracker;
diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
index a6d481b5bc..a608546bce 100644
--- a/include/net/tc_wrapper.h
+++ b/include/net/tc_wrapper.h
@@ -117,10 +117,6 @@ static inline int tc_act(struct sk_buff *skb, const struct tc_action *a,
if (a->ops->act == tcf_ife_act)
return tcf_ife_act(skb, a, res);
#endif
-#if IS_BUILTIN(CONFIG_NET_ACT_IPT)
- if (a->ops->act == tcf_ipt_act)
- return tcf_ipt_act(skb, a, res);
-#endif
#if IS_BUILTIN(CONFIG_NET_ACT_SIMP)
if (a->ops->act == tcf_simp_act)
return tcf_simp_act(skb, a, res);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2ac9475be1..f6eba9652d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -490,13 +490,14 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst, u32 tsoff);
-int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
- u32 cookie);
+ struct dst_entry *dst);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
- const struct tcp_request_sock_ops *af_ops,
- struct sock *sk, struct sk_buff *skb);
+ struct sock *sk, struct sk_buff *skb,
+ struct tcp_options_received *tcp_opt,
+ int mss, u32 tsoff);
+
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -582,12 +583,15 @@ __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
-bool cookie_ecn_ok(const struct tcp_options_received *opt,
- const struct net *net, const struct dst_entry *dst);
+
+static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
+{
+ return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
+ dst_feature(dst, RTAX_FEATURE_ECN);
+}
/* From net/ipv6/syncookies.c */
-int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
- u32 cookie);
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index b04afced4c..471e177362 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -291,8 +291,7 @@ void tcp_ao_established(struct sock *sk);
void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb);
void tcp_ao_connect_init(struct sock *sk);
void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
- struct tcp_request_sock *treq,
- unsigned short int family, int l3index);
+ struct request_sock *req, unsigned short int family);
#else /* CONFIG_TCP_AO */
static inline int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
@@ -303,8 +302,7 @@ static inline int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
}
static inline void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
- struct tcp_request_sock *treq,
- unsigned short int family, int l3index)
+ struct request_sock *req, unsigned short int family)
{
}
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index cc00118acc..d60e8148ff 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -22,6 +22,7 @@ enum {
TCP_LISTEN,
TCP_CLOSING, /* Now a valid state */
TCP_NEW_SYN_RECV,
+ TCP_BOUND_INACTIVE, /* Pseudo-state for inet_diag */
TCP_MAX_STATES /* Leave at the end! */
};
@@ -43,6 +44,7 @@ enum {
TCPF_LISTEN = (1 << TCP_LISTEN),
TCPF_CLOSING = (1 << TCP_CLOSING),
TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV),
+ TCPF_BOUND_INACTIVE = (1 << TCP_BOUND_INACTIVE),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index 340ad43971..33f657d3c0 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -111,7 +111,8 @@ struct tls_strparser {
u32 stopped : 1;
u32 copy_mode : 1;
u32 mixed_decrypted : 1;
- u32 msg_ready : 1;
+
+ bool msg_ready;
struct strp_msg stm;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 6a9f8a5f38..33ba6fc151 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -210,22 +210,23 @@ struct vxlan_rdst {
};
struct vxlan_config {
- union vxlan_addr remote_ip;
- union vxlan_addr saddr;
- __be32 vni;
- int remote_ifindex;
- int mtu;
- __be16 dst_port;
- u16 port_min;
- u16 port_max;
- u8 tos;
- u8 ttl;
- __be32 label;
- u32 flags;
- unsigned long age_interval;
- unsigned int addrmax;
- bool no_share;
- enum ifla_vxlan_df df;
+ union vxlan_addr remote_ip;
+ union vxlan_addr saddr;
+ __be32 vni;
+ int remote_ifindex;
+ int mtu;
+ __be16 dst_port;
+ u16 port_min;
+ u16 port_max;
+ u8 tos;
+ u8 ttl;
+ __be32 label;
+ enum ifla_vxlan_label_policy label_policy;
+ u32 flags;
+ unsigned long age_interval;
+ unsigned int addrmax;
+ bool no_share;
+ enum ifla_vxlan_df df;
};
enum {
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 349c36fb5f..e6770dd40c 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -16,7 +16,7 @@
*
* The XDP RX-queue info (xdp_rxq_info) is associated with the driver
* level RX-ring queues. It is information that is specific to how
- * the driver have configured a given RX-ring queue.
+ * the driver has configured a given RX-ring queue.
*
* Each xdp_buff frame received in the driver carries a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
@@ -32,7 +32,7 @@
* The struct is not directly tied to the XDP prog. A new XDP prog
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
- * naturally need to stop the RX-ring before purging and reallocating
+ * naturally needs to stop the RX-ring before purging and reallocating
* memory. In that process the driver MUST call unregister (which
* also applies for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
@@ -369,7 +369,12 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
static inline bool xdp_metalen_invalid(unsigned long metalen)
{
- return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
+ unsigned long meta_max;
+
+ meta_max = type_max(typeof_member(struct skb_shared_info, meta_len));
+ BUILD_BUG_ON(!__builtin_constant_p(meta_max));
+
+ return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max;
}
struct xdp_attachment_info {
@@ -399,6 +404,10 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
NETDEV_XDP_RX_METADATA_HASH, \
bpf_xdp_metadata_rx_hash, \
xmo_rx_hash) \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \
+ NETDEV_XDP_RX_METADATA_VLAN_TAG, \
+ bpf_xdp_metadata_rx_vlan_tag, \
+ xmo_rx_vlan_tag) \
enum xdp_rx_metadata {
#define XDP_METADATA_KFUNC(name, _, __, ___) name,
@@ -427,6 +436,7 @@ enum xdp_rss_hash_type {
XDP_RSS_L4_UDP = BIT(5),
XDP_RSS_L4_SCTP = BIT(6),
XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */
+ XDP_RSS_L4_ICMP = BIT(8),
/* Second part: RSS hash type combinations used for driver HW mapping */
XDP_RSS_TYPE_NONE = 0,
@@ -442,11 +452,13 @@ enum xdp_rss_hash_type {
XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+ XDP_RSS_TYPE_L4_IPV4_ICMP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+ XDP_RSS_TYPE_L4_IPV6_ICMP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR,
XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR,
@@ -457,6 +469,8 @@ struct xdp_metadata_ops {
int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
enum xdp_rss_hash_type *rss_type);
+ int (*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci);
};
#ifdef CONFIG_NET
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index f83128007f..3d54de168a 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -30,6 +30,7 @@ struct xdp_umem {
struct user_struct *user;
refcount_t users;
u8 flags;
+ u8 tx_metadata_len;
bool zc;
struct page **pgs;
int id;
@@ -92,12 +93,107 @@ struct xdp_sock {
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
+/*
+ * AF_XDP TX metadata hooks for network devices.
+ * The following hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer.
+ *
+ * void (*tmo_request_timestamp)(void *priv)
+ * Called when AF_XDP frame requested egress timestamp.
+ *
+ * u64 (*tmo_fill_timestamp)(void *priv)
+ * Called when AF_XDP frame, that had requested egress timestamp,
+ * received a completion. The hook needs to return the actual HW timestamp.
+ *
+ * void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv)
+ * Called when AF_XDP frame requested HW checksum offload. csum_start
+ * indicates position where checksumming should start.
+ * csum_offset indicates position where checksum should be stored.
+ *
+ */
+struct xsk_tx_metadata_ops {
+ void (*tmo_request_timestamp)(void *priv);
+ u64 (*tmo_fill_timestamp)(void *priv);
+ void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv);
+};
+
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);
+/**
+ * xsk_tx_metadata_to_compl - Save enough relevant metadata information
+ * to perform tx completion in the future.
+ * @meta: pointer to AF_XDP metadata area
+ * @compl: pointer to output struct xsk_tx_metadata_to_compl
+ *
+ * This function should be called by the networking device when
+ * it prepares AF_XDP egress packet. The value of @compl should be stored
+ * and passed to xsk_tx_metadata_complete upon TX completion.
+ */
+static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
+ struct xsk_tx_metadata_compl *compl)
+{
+ if (!meta)
+ return;
+
+ if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
+ compl->tx_timestamp = &meta->completion.tx_timestamp;
+ else
+ compl->tx_timestamp = NULL;
+}
+
+/**
+ * xsk_tx_metadata_request - Evaluate AF_XDP TX metadata at submission
+ * and call appropriate xsk_tx_metadata_ops operation.
+ * @meta: pointer to AF_XDP metadata area
+ * @ops: pointer to struct xsk_tx_metadata_ops
+ * @priv: pointer to driver-private aread
+ *
+ * This function should be called by the networking device when
+ * it prepares AF_XDP egress packet.
+ */
+static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+ if (!meta)
+ return;
+
+ if (ops->tmo_request_timestamp)
+ if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
+ ops->tmo_request_timestamp(priv);
+
+ if (ops->tmo_request_checksum)
+ if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM)
+ ops->tmo_request_checksum(meta->request.csum_start,
+ meta->request.csum_offset, priv);
+}
+
+/**
+ * xsk_tx_metadata_complete - Evaluate AF_XDP TX metadata at completion
+ * and call appropriate xsk_tx_metadata_ops operation.
+ * @compl: pointer to completion metadata produced from xsk_tx_metadata_to_compl
+ * @ops: pointer to struct xsk_tx_metadata_ops
+ * @priv: pointer to driver-private aread
+ *
+ * This function should be called by the networking device upon
+ * AF_XDP egress completion.
+ */
+static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+ if (!compl)
+ return;
+ if (!compl->tx_timestamp)
+ return;
+
+ *compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
+}
+
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
@@ -114,6 +210,23 @@ static inline void __xsk_map_flush(void)
{
}
+static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
+ struct xsk_tx_metadata_compl *compl)
+{
+}
+
+static inline void xsk_tx_metadata_request(struct xsk_tx_metadata *meta,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+}
+
+static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+}
+
#endif /* CONFIG_XDP_SOCKETS */
#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 5425f7ad5e..c9aec9ab61 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -12,6 +12,12 @@
#define XDP_UMEM_MIN_CHUNK_SHIFT 11
#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
+struct xsk_cb_desc {
+ void *src;
+ u8 off;
+ u8 bytes;
+};
+
#ifdef CONFIG_XDP_SOCKETS
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
@@ -47,6 +53,12 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
xp_set_rxq_info(pool, rxq);
}
+static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
+ struct xsk_cb_desc *desc)
+{
+ xp_fill_cb(pool, desc);
+}
+
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -183,6 +195,30 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return xp_raw_get_data(pool, addr);
}
+#define XDP_TXMD_FLAGS_VALID ( \
+ XDP_TXMD_FLAGS_TIMESTAMP | \
+ XDP_TXMD_FLAGS_CHECKSUM | \
+ 0)
+
+static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+{
+ return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
+}
+
+static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+{
+ struct xsk_tx_metadata *meta;
+
+ if (!pool->tx_metadata_len)
+ return NULL;
+
+ meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
+ if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
+ return NULL; /* no way to signal the error to the user */
+
+ return meta;
+}
+
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
@@ -268,6 +304,11 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
{
}
+static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
+ struct xsk_cb_desc *desc)
+{
+}
+
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
{
return 0;
@@ -351,6 +392,16 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return NULL;
}
+static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+{
+ return false;
+}
+
+static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+{
+ return NULL;
+}
+
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index c9bb0f892f..1d107241b9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -2190,4 +2190,13 @@ static inline int register_xfrm_interface_bpf(void)
#endif
+#if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
+int register_xfrm_state_bpf(void);
+#else
+static inline int register_xfrm_state_bpf(void)
+{
+ return 0;
+}
+#endif
+
#endif /* _NET_XFRM_H */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index b0bdff26fc..99dd7376df 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -12,6 +12,7 @@
struct xsk_buff_pool;
struct xdp_rxq_info;
+struct xsk_cb_desc;
struct xsk_queue;
struct xdp_desc;
struct xdp_umem;
@@ -33,6 +34,7 @@ struct xdp_buff_xsk {
};
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
struct xsk_dma_map {
dma_addr_t *dma_pages;
@@ -77,10 +79,12 @@ struct xsk_buff_pool {
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
+ u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
+ bool tx_sw_csum;
void *addrs;
/* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
* NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
@@ -132,6 +136,7 @@ static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_p
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
+void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
@@ -233,4 +238,9 @@ static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
}
+static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
+{
+ return pool->tx_metadata_len > 0;
+}
+
#endif /* XSK_BUFF_POOL_H_ */
diff --git a/include/soc/fsl/qe/qmc.h b/include/soc/fsl/qe/qmc.h
index 3c61a50d2a..2a333fc1ea 100644
--- a/include/soc/fsl/qe/qmc.h
+++ b/include/soc/fsl/qe/qmc.h
@@ -9,6 +9,7 @@
#ifndef __SOC_FSL_QMC_H__
#define __SOC_FSL_QMC_H__
+#include <linux/bits.h>
#include <linux/types.h>
struct device_node;
@@ -16,9 +17,11 @@ struct device;
struct qmc_chan;
struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name);
+struct qmc_chan *qmc_chan_get_bychild(struct device_node *np);
void qmc_chan_put(struct qmc_chan *chan);
struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev, struct device_node *np,
const char *phandle_name);
+struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, struct device_node *np);
enum qmc_mode {
QMC_TRANSPARENT,
@@ -37,6 +40,16 @@ struct qmc_chan_info {
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info);
+struct qmc_chan_ts_info {
+ u64 rx_ts_mask_avail;
+ u64 tx_ts_mask_avail;
+ u64 rx_ts_mask;
+ u64 tx_ts_mask;
+};
+
+int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info);
+int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info);
+
struct qmc_chan_param {
enum qmc_mode mode;
union {
@@ -56,8 +69,20 @@ int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param
int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
void (*complete)(void *context), void *context);
+/* Flags available (ORed) for read complete() flags parameter in HDLC mode.
+ * No flags are available in transparent mode and the read complete() flags
+ * parameter has no meaning in transparent mode.
+ */
+#define QMC_RX_FLAG_HDLC_LAST BIT(11) /* Last in frame */
+#define QMC_RX_FLAG_HDLC_FIRST BIT(10) /* First in frame */
+#define QMC_RX_FLAG_HDLC_OVF BIT(5) /* Data overflow */
+#define QMC_RX_FLAG_HDLC_UNA BIT(4) /* Unaligned (ie. bits received not multiple of 8) */
+#define QMC_RX_FLAG_HDLC_ABORT BIT(3) /* Received an abort sequence (seven consecutive ones) */
+#define QMC_RX_FLAG_HDLC_CRC BIT(2) /* CRC error */
+
int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
- void (*complete)(void *context, size_t length),
+ void (*complete)(void *context, size_t length,
+ unsigned int flags),
void *context);
#define QMC_CHAN_READ (1<<0)
diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h
index f916dcde45..09722f83b0 100644
--- a/include/soc/microchip/mpfs.h
+++ b/include/soc/microchip/mpfs.h
@@ -38,6 +38,8 @@ int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, struct mp
struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev);
+struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_client);
+
#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */
#if IS_ENABLED(CONFIG_MCHP_CLK_MPFS)
diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
index fdd462b295..a62d500a6f 100644
--- a/include/soc/qcom/qcom-spmi-pmic.h
+++ b/include/soc/qcom/qcom-spmi-pmic.h
@@ -31,6 +31,7 @@
#define PM8998_SUBTYPE 0x14
#define PMI8998_SUBTYPE 0x15
#define PM8005_SUBTYPE 0x18
+#define PM8937_SUBTYPE 0x19
#define PM660L_SUBTYPE 0x1a
#define PM660_SUBTYPE 0x1b
#define PM8150_SUBTYPE 0x1e
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 71ae37d3be..af1d73a7f0 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -162,6 +162,7 @@ struct tegra_mc_ops {
*/
int (*probe)(struct tegra_mc *mc);
void (*remove)(struct tegra_mc *mc);
+ int (*resume)(struct tegra_mc *mc);
irqreturn_t (*handle_irq)(int irq, void *data);
int (*probe_device)(struct tegra_mc *mc, struct device *dev);
};
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index c495c6d5fb..4bd3be3a31 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -410,7 +410,7 @@ int snd_ac97_pcm_close(struct ac97_pcm *pcm);
int snd_ac97_pcm_double_rate_rules(struct snd_pcm_runtime *runtime);
/* ad hoc AC97 device driver access */
-extern struct bus_type ac97_bus_type;
+extern const struct bus_type ac97_bus_type;
/* AC97 platform_data adding function */
static inline void snd_ac97_dev_add_pdata(struct snd_ac97 *ac97, void *data)
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 8c18e8b6d2..b24716ab27 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -75,6 +75,7 @@
#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_0 0x25E2040
#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_1 0x25E2044
#define CS35L56_DSP1_XMEM_UNPACKED24_0 0x2800000
+#define CS35L56_DSP1_FW_VER 0x2800010
#define CS35L56_DSP1_HALO_STATE_A1 0x2801E58
#define CS35L56_DSP1_HALO_STATE 0x28021E0
#define CS35L56_DSP1_PM_CUR_STATE_A1 0x2804000
@@ -241,7 +242,7 @@
#define CS35L56_CONTROL_PORT_READY_US 2200
#define CS35L56_HALO_STATE_POLL_US 1000
-#define CS35L56_HALO_STATE_TIMEOUT_US 50000
+#define CS35L56_HALO_STATE_TIMEOUT_US 250000
#define CS35L56_RESET_PULSE_MIN_US 1100
#define CS35L56_WAKE_HOLD_TIME_US 1000
@@ -272,6 +273,7 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
@@ -284,7 +286,10 @@ int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base);
int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
+int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
+ bool *fw_missing, unsigned int *fw_version);
int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
+int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq);
void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h
index 6ff23ab488..5a55d135bd 100644
--- a/include/sound/cs4271.h
+++ b/include/sound/cs4271.h
@@ -9,7 +9,6 @@
#define __CS4271_H
struct cs4271_platform_data {
- int gpio_nreset; /* GPIO driving Reset pin, if any */
bool amutec_eq_bmutec; /* flag to enable AMUTEC=BMUTEC */
/*
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 5497dc9c39..9c94ba7c18 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -141,6 +141,7 @@ struct hda_pcm_stream {
hda_nid_t nid; /* default NID to query rates/formats/bps, or set up */
u32 rates; /* supported rates */
u64 formats; /* supported formats (SNDRV_PCM_FMTBIT_) */
+ u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */
unsigned int maxbps; /* supported max. bit per sample */
const struct snd_pcm_chmap_elem *chmap; /* chmap to override */
struct hda_pcm_ops ops;
@@ -448,8 +449,8 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
#define snd_hda_codec_cleanup_stream(codec, nid) \
__snd_hda_codec_cleanup_stream(codec, nid, 0)
-#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, bpsp) \
- snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, bpsp)
+#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, subfmtp, bpsp) \
+ snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, subfmtp, bpsp)
#define snd_hda_is_supported_format(codec, nid, fmt) \
snd_hdac_is_supported_format(&(codec)->core, nid, fmt)
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index dd7c87bbc6..a73d7f34f4 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -33,7 +33,7 @@ struct hda_device_id;
/*
* exported bus type
*/
-extern struct bus_type snd_hda_bus_type;
+extern const struct bus_type snd_hda_bus_type;
/*
* generic arrays
@@ -140,13 +140,14 @@ int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *conn_list, int max_conns);
int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *start_id);
-unsigned int snd_hdac_calc_stream_format(unsigned int rate,
- unsigned int channels,
- snd_pcm_format_t format,
- unsigned int maxbps,
- unsigned short spdif_ctls);
+unsigned int snd_hdac_stream_format_bits(snd_pcm_format_t format, snd_pcm_subformat_t subformat,
+ unsigned int maxbits);
+unsigned int snd_hdac_stream_format(unsigned int channels, unsigned int bits, unsigned int rate);
+unsigned int snd_hdac_spdif_stream_format(unsigned int channels, unsigned int bits,
+ unsigned int rate, unsigned short spdif_ctls);
int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
- u32 *ratesp, u64 *formatsp, unsigned int *bpsp);
+ u32 *ratesp, u64 *formatsp, u32 *subformatsp,
+ unsigned int *bpsp);
bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid,
unsigned int format);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index a8bebac1e4..957295364a 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -56,6 +56,9 @@ struct hdac_ext_stream {
u32 pphcldpl;
u32 pphcldpu;
+ u32 pplcllpl;
+ u32 pplcllpu;
+
bool decoupled:1;
bool link_locked:1;
bool link_prepared;
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 2a815373da..cc175c623d 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -32,6 +32,7 @@
struct snd_pcm_hardware {
unsigned int info; /* SNDRV_PCM_INFO_* */
u64 formats; /* SNDRV_PCM_FMTBIT_* */
+ u32 subformats; /* for S32_LE, SNDRV_PCM_SUBFMTBIT_* */
unsigned int rates; /* SNDRV_PCM_RATE_* */
unsigned int rate_min; /* min rate */
unsigned int rate_max; /* max rate */
@@ -217,6 +218,12 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE
#endif
+#define _SNDRV_PCM_SUBFMTBIT(fmt) BIT((__force int)SNDRV_PCM_SUBFORMAT_##fmt)
+#define SNDRV_PCM_SUBFMTBIT_STD _SNDRV_PCM_SUBFMTBIT(STD)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_MAX _SNDRV_PCM_SUBFMTBIT(MSBITS_MAX)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_20 _SNDRV_PCM_SUBFMTBIT(MSBITS_20)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_24 _SNDRV_PCM_SUBFMTBIT(MSBITS_24)
+
struct snd_pcm_file {
struct snd_pcm_substream *substream;
int no_compat_mmap;
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index ba184f49f7..fbf35df6e5 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -362,6 +362,8 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
return snd_pcm_format_physical_width(params_format(p));
}
+int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p);
+
static inline void
params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
{
diff --git a/include/sound/rt5682s.h b/include/sound/rt5682s.h
index 66ca0c75b9..006e6003d1 100644
--- a/include/sound/rt5682s.h
+++ b/include/sound/rt5682s.h
@@ -31,6 +31,13 @@ enum rt5682s_dai_clks {
RT5682S_DAI_NUM_CLKS,
};
+enum {
+ RT5682S_LDO_1_607V,
+ RT5682S_LDO_1_5V,
+ RT5682S_LDO_1_406V,
+ RT5682S_LDO_1_731V,
+};
+
struct rt5682s_platform_data {
enum rt5682s_dmic1_data_pin dmic1_data_pin;
enum rt5682s_dmic1_clk_pin dmic1_clk_pin;
@@ -38,6 +45,7 @@ struct rt5682s_platform_data {
unsigned int dmic_clk_rate;
unsigned int dmic_delay;
unsigned int amic_delay;
+ unsigned int ldo_dacref;
bool dmic_clk_driving_high;
const char *dai_clk_names[RT5682S_DAI_NUM_CLKS];
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index e5da10b4c4..ad67957b7b 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -195,6 +195,9 @@ int graph_util_is_ports0(struct device_node *port);
int graph_util_parse_dai(struct device *dev, struct device_node *ep,
struct snd_soc_dai_link_component *dlc, int *is_single_link);
+int graph_util_parse_link_direction(struct device_node *np,
+ bool *is_playback_only, bool *is_capture_only);
+
#ifdef DEBUG
static inline void simple_util_debug_dai(struct simple_util_priv *priv,
char *name,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 7792c393e2..6defc5547f 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -620,6 +620,7 @@ enum snd_soc_trigger_order {
struct snd_soc_pcm_stream {
const char *stream_name;
u64 formats; /* SNDRV_PCM_FMTBIT_* */
+ u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */
unsigned int rates; /* SNDRV_PCM_RATE_* */
unsigned int rate_min; /* min rate */
unsigned int rate_max; /* max rate */
@@ -655,8 +656,45 @@ struct snd_soc_dai_link_component {
struct of_phandle_args *dai_args;
};
-struct snd_soc_dai_link_codec_ch_map {
- unsigned int connected_cpu_id;
+/*
+ * [dai_link->ch_maps Image sample]
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <---> Codec1
+ * CPU2 <---> Codec2
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 2 ch-map[2].codec = 2
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <-+-> Codec1
+ * CPU2 <-/
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 2 ch-map[2].codec = 1
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <-+-> Codec1
+ * \-> Codec2
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 1 ch-map[2].codec = 2
+ *
+ */
+struct snd_soc_dai_link_ch_map {
+ unsigned int cpu;
+ unsigned int codec;
unsigned int ch_mask;
};
@@ -688,7 +726,9 @@ struct snd_soc_dai_link {
struct snd_soc_dai_link_component *codecs;
unsigned int num_codecs;
- struct snd_soc_dai_link_codec_ch_map *codec_ch_maps;
+ /* num_ch_maps = max(num_cpu, num_codecs) */
+ struct snd_soc_dai_link_ch_map *ch_maps;
+
/*
* You MAY specify the link's platform/PCM/DMA driver, either by
* device name, or by DT/OF node, but not both. Some forms of link
@@ -775,6 +815,10 @@ struct snd_soc_dai_link {
#endif
};
+static inline int snd_soc_link_num_ch_map(struct snd_soc_dai_link *link) {
+ return max(link->num_cpus, link->num_codecs);
+}
+
static inline struct snd_soc_dai_link_component*
snd_soc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
return &(link)->cpus[n];
@@ -808,6 +852,12 @@ snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
((cpu) = snd_soc_link_to_cpu(link, i)); \
(i)++)
+#define for_each_link_ch_maps(link, i, ch_map) \
+ for ((i) = 0; \
+ ((i) < snd_soc_link_num_ch_map(link) && \
+ ((ch_map) = link->ch_maps + i)); \
+ (i)++)
+
/*
* Sample 1 : Single CPU/Codec/Platform
*
@@ -889,7 +939,7 @@ snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
#define COMP_PLATFORM(_name) { .name = _name }
#define COMP_AUX(_name) { .name = _name }
#define COMP_CODEC_CONF(_name) { .name = _name }
-#define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", }
+#define COMP_DUMMY() /* see snd_soc_fill_dummy_dai() */
extern struct snd_soc_dai_link_component null_dailink_component[0];
extern struct snd_soc_dai_link_component snd_soc_dummy_dlc;
@@ -1163,6 +1213,7 @@ struct snd_soc_pcm_runtime {
((i) < (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs) && \
((dai) = (rtd)->dais[i]); \
(i)++)
+#define for_each_rtd_ch_maps(rtd, i, ch_maps) for_each_link_ch_maps(rtd->dai_link, i, ch_maps)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 268d0ca0f6..05213bb515 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -57,6 +57,18 @@ enum sof_ipc_type {
SOF_IPC_TYPE_COUNT
};
+struct sof_loadable_file_profile {
+ enum sof_ipc_type ipc_type;
+
+ const char *fw_path;
+ const char *fw_path_postfix;
+ const char *fw_name;
+ const char *fw_lib_path;
+ const char *fw_lib_path_postfix;
+ const char *tplg_path;
+ const char *tplg_name;
+};
+
/*
* SOF Platform data.
*/
@@ -86,6 +98,9 @@ struct snd_sof_pdata {
/* descriptor */
const struct sof_dev_desc *desc;
+ /* platform's preferred IPC type and path overrides */
+ struct sof_loadable_file_profile ipc_file_profile_base;
+
/* firmware and topology filenames */
const char *fw_filename_prefix;
const char *fw_filename;
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
index ca8325353d..6bc987bd47 100644
--- a/include/sound/sof/dai-imx.h
+++ b/include/sound/sof/dai-imx.h
@@ -51,4 +51,11 @@ struct sof_ipc_dai_sai_params {
uint16_t tdm_slot_width;
uint16_t reserved2; /* alignment */
} __packed;
+
+/* MICFIL Configuration Request - SOF_IPC_DAI_MICFIL_CONFIG */
+struct sof_ipc_dai_micfil_params {
+ uint32_t pdm_rate;
+ uint32_t pdm_ch;
+} __packed;
+
#endif
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 3041f5805b..4773a5f616 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -88,6 +88,7 @@ enum sof_ipc_dai_type {
SOF_DAI_AMD_HS, /**< Amd HS */
SOF_DAI_AMD_SP_VIRTUAL, /**< AMD ACP SP VIRTUAL */
SOF_DAI_AMD_HS_VIRTUAL, /**< AMD ACP HS VIRTUAL */
+ SOF_DAI_IMX_MICFIL, /** < i.MX MICFIL PDM */
};
/* general purpose DAI configuration */
@@ -117,6 +118,7 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_acpdmic_params acpdmic;
struct sof_ipc_dai_acp_params acphs;
struct sof_ipc_dai_mtk_afe_params afe;
+ struct sof_ipc_dai_micfil_params micfil;
};
} __packed;
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index 574a9d581f..1eb538e182 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -423,6 +423,12 @@ enum sof_ipc4_fw_config_params {
SOF_IPC4_FW_CFG_RESERVED,
SOF_IPC4_FW_CFG_POWER_GATING_POLICY,
SOF_IPC4_FW_CFG_ASSERT_MODE,
+ SOF_IPC4_FW_RESERVED1,
+ SOF_IPC4_FW_RESERVED2,
+ SOF_IPC4_FW_RESERVED3,
+ SOF_IPC4_FW_RESERVED4,
+ SOF_IPC4_FW_RESERVED5,
+ SOF_IPC4_FW_CONTEXT_SAVE
};
struct sof_ipc4_fw_version {
@@ -532,6 +538,35 @@ struct sof_ipc4_notify_resource_data {
#define SOF_IPC4_DEBUG_SLOT_TELEMETRY 0x4c455400
#define SOF_IPC4_DEBUG_SLOT_BROKEN 0x44414544
+/**
+ * struct sof_ipc4_notify_module_data - payload for module notification
+ * @instance_id: instance ID of the originator module of the notification
+ * @module_id: module ID of the originator of the notification
+ * @event_id: module specific event id
+ * @event_data_size: Size of the @event_data (if any) in bytes
+ * @event_data: Optional notification data, module and notification dependent
+ */
+struct sof_ipc4_notify_module_data {
+ uint16_t instance_id;
+ uint16_t module_id;
+ uint32_t event_id;
+ uint32_t event_data_size;
+ uint8_t event_data[];
+} __packed __aligned(4);
+
+/*
+ * ALSA kcontrol change notification
+ *
+ * The event_id of struct sof_ipc4_notify_module_data is divided into two u16:
+ * upper u16: magic number for ALSA kcontrol types: 0xA15A
+ * lower u16: param_id of the control, which is the type of the control
+ * The event_data contains the struct sof_ipc4_control_msg_payload of the control
+ * which sent the notification.
+ */
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_MASK GENMASK(31, 16)
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_VAL 0xA15A0000
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_PARAMID_MASK GENMASK(15, 0)
+
/** @}*/
#endif
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 906e2f327a..b3ca886fa2 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -39,6 +39,7 @@ enum sof_comp_type {
SOF_COMP_ASRC, /**< Asynchronous sample rate converter */
SOF_COMP_DCBLOCK,
SOF_COMP_SMART_AMP, /**< smart amplifier component */
+ SOF_COMP_MODULE_ADAPTER, /**< module adapter */
/* keep FILEREAD/FILEWRITE as the last ones */
SOF_COMP_FILEREAD = 10000, /**< host test based file IO */
SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */
@@ -59,7 +60,7 @@ struct sof_ipc_comp {
/* extended data length, 0 if no extended data */
uint32_t ext_data_length;
-} __packed;
+} __packed __aligned(4);
/*
* Component Buffers
@@ -68,14 +69,15 @@ struct sof_ipc_comp {
/*
* SOF memory capabilities, add new ones at the end
*/
-#define SOF_MEM_CAPS_RAM (1 << 0)
-#define SOF_MEM_CAPS_ROM (1 << 1)
-#define SOF_MEM_CAPS_EXT (1 << 2) /**< external */
-#define SOF_MEM_CAPS_LP (1 << 3) /**< low power */
-#define SOF_MEM_CAPS_HP (1 << 4) /**< high performance */
-#define SOF_MEM_CAPS_DMA (1 << 5) /**< DMA'able */
-#define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */
-#define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */
+#define SOF_MEM_CAPS_RAM BIT(0)
+#define SOF_MEM_CAPS_ROM BIT(1)
+#define SOF_MEM_CAPS_EXT BIT(2) /**< external */
+#define SOF_MEM_CAPS_LP BIT(3) /**< low power */
+#define SOF_MEM_CAPS_HP BIT(4) /**< high performance */
+#define SOF_MEM_CAPS_DMA BIT(5) /**< DMA'able */
+#define SOF_MEM_CAPS_CACHE BIT(6) /**< cacheable */
+#define SOF_MEM_CAPS_EXEC BIT(7) /**< executable */
+#define SOF_MEM_CAPS_L3 BIT(8) /**< L3 memory */
/*
* overrun will cause ring buffer overwrite, instead of XRUN.
@@ -87,6 +89,9 @@ struct sof_ipc_comp {
*/
#define SOF_BUF_UNDERRUN_PERMITTED BIT(1)
+/* the UUID size in bytes, shared between FW and host */
+#define SOF_UUID_SIZE 16
+
/* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */
struct sof_ipc_buffer {
struct sof_ipc_comp comp;
@@ -94,7 +99,7 @@ struct sof_ipc_buffer {
uint32_t caps; /**< SOF_MEM_CAPS_ */
uint32_t flags; /**< SOF_BUF_ flags defined above */
uint32_t reserved; /**< reserved for future use */
-} __packed;
+} __packed __aligned(4);
/* generic component config data - must always be after struct sof_ipc_comp */
struct sof_ipc_comp_config {
@@ -107,7 +112,7 @@ struct sof_ipc_comp_config {
/* reserved for future use */
uint32_t reserved[2];
-} __packed;
+} __packed __aligned(4);
/* generic host component */
struct sof_ipc_comp_host {
@@ -116,7 +121,7 @@ struct sof_ipc_comp_host {
uint32_t direction; /**< SOF_IPC_STREAM_ */
uint32_t no_irq; /**< don't send periodic IRQ to host/DSP */
uint32_t dmac_config; /**< DMA engine specific */
-} __packed;
+} __packed __aligned(4);
/* generic DAI component */
struct sof_ipc_comp_dai {
@@ -126,13 +131,13 @@ struct sof_ipc_comp_dai {
uint32_t dai_index; /**< index of this type dai */
uint32_t type; /**< DAI type - SOF_DAI_ */
uint32_t reserved; /**< reserved */
-} __packed;
+} __packed __aligned(4);
/* generic mixer component */
struct sof_ipc_comp_mixer {
struct sof_ipc_comp comp;
struct sof_ipc_comp_config config;
-} __packed;
+} __packed __aligned(4);
/* volume ramping types */
enum sof_volume_ramp {
@@ -140,6 +145,8 @@ enum sof_volume_ramp {
SOF_VOLUME_LOG,
SOF_VOLUME_LINEAR_ZC,
SOF_VOLUME_LOG_ZC,
+ SOF_VOLUME_WINDOWS_FADE,
+ SOF_VOLUME_WINDOWS_NO_FADE,
};
/* generic volume component */
@@ -151,7 +158,7 @@ struct sof_ipc_comp_volume {
uint32_t max_value;
uint32_t ramp; /**< SOF_VOLUME_ */
uint32_t initial_ramp; /**< ramp space in ms */
-} __packed;
+} __packed __aligned(4);
/* generic SRC component */
struct sof_ipc_comp_src {
@@ -161,7 +168,7 @@ struct sof_ipc_comp_src {
uint32_t source_rate; /**< source rate or 0 for variable */
uint32_t sink_rate; /**< sink rate or 0 for variable */
uint32_t rate_mask; /**< SOF_RATE_ supported rates */
-} __packed;
+} __packed __aligned(4);
/* generic ASRC component */
struct sof_ipc_comp_asrc {
@@ -187,13 +194,13 @@ struct sof_ipc_comp_asrc {
/* reserved for future use */
uint32_t reserved[4];
-} __attribute__((packed));
+} __packed __aligned(4);
/* generic MUX component */
struct sof_ipc_comp_mux {
struct sof_ipc_comp comp;
struct sof_ipc_comp_config config;
-} __packed;
+} __packed __aligned(4);
/* generic tone generator component */
struct sof_ipc_comp_tone {
@@ -208,7 +215,7 @@ struct sof_ipc_comp_tone {
int32_t period;
int32_t repeats;
int32_t ramp_step;
-} __packed;
+} __packed __aligned(4);
/** \brief Types of processing components */
enum sof_ipc_process_type {
@@ -234,8 +241,8 @@ struct sof_ipc_comp_process {
/* reserved for future use */
uint32_t reserved[7];
- uint8_t data[];
-} __packed;
+ unsigned char data[];
+} __packed __aligned(4);
/* frees components, buffers and pipelines
* SOF_IPC_TPLG_COMP_FREE, SOF_IPC_TPLG_PIPE_FREE, SOF_IPC_TPLG_BUFFER_FREE
@@ -243,13 +250,13 @@ struct sof_ipc_comp_process {
struct sof_ipc_free {
struct sof_ipc_cmd_hdr hdr;
uint32_t id;
-} __packed;
+} __packed __aligned(4);
struct sof_ipc_comp_reply {
struct sof_ipc_reply rhdr;
uint32_t id;
uint32_t offset;
-} __packed;
+} __packed __aligned(4);
/*
* Pipeline
@@ -274,25 +281,25 @@ struct sof_ipc_pipe_new {
uint32_t frames_per_sched;/**< output frames of pipeline, 0 is variable */
uint32_t xrun_limit_usecs; /**< report xruns greater than limit */
uint32_t time_domain; /**< scheduling time domain */
-} __packed;
+} __packed __aligned(4);
/* pipeline construction complete - SOF_IPC_TPLG_PIPE_COMPLETE */
struct sof_ipc_pipe_ready {
struct sof_ipc_cmd_hdr hdr;
uint32_t comp_id;
-} __packed;
+} __packed __aligned(4);
struct sof_ipc_pipe_free {
struct sof_ipc_cmd_hdr hdr;
uint32_t comp_id;
-} __packed;
+} __packed __aligned(4);
/* connect two components in pipeline - SOF_IPC_TPLG_COMP_CONNECT */
struct sof_ipc_pipe_comp_connect {
struct sof_ipc_cmd_hdr hdr;
uint32_t source_id;
uint32_t sink_id;
-} __packed;
+} __packed __aligned(4);
/* external events */
enum sof_event_types {
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index be58d87050..9aff384941 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
//
-// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
//
// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
// https://www.ti.com
//
-// The TAS2781 driver implements a flexible and configurable
+// The TAS2563/TAS2781 driver implements a flexible and configurable
// algo coefficient setting for one, two, or even multiple
-// TAS2781 chips.
+// TAS2563/TAS2781 chips.
//
// Author: Shenghao Ding <shenghao-ding@ti.com>
// Author: Kevin Lu <kevin-lu@ti.com>
@@ -22,6 +22,7 @@
#define TAS2781_DRV_VER 1
#define SMARTAMP_MODULE_NAME "tas2781"
#define TAS2781_GLOBAL_ADDR 0x40
+#define TAS2563_GLOBAL_ADDR 0x48
#define TASDEVICE_RATES (SNDRV_PCM_RATE_44100 |\
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |\
SNDRV_PCM_RATE_88200)
@@ -59,7 +60,8 @@
#define TASDEVICE_CMD_FIELD_W 0x4
enum audio_device {
- TAS2781 = 0,
+ TAS2563,
+ TAS2781,
};
enum device_catlog_id {
@@ -121,6 +123,8 @@ struct tasdevice_priv {
bool force_fwload_status;
bool playback_started;
bool isacpi;
+ unsigned int global_addr;
+
int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
const struct firmware *fmw, int offset);
int (*fw_parse_program_data)(struct tasdevice_priv *tas_priv,
diff --git a/include/sound/wm0010.h b/include/sound/wm0010.h
index 13b473935c..14ff9056c5 100644
--- a/include/sound/wm0010.h
+++ b/include/sound/wm0010.h
@@ -11,12 +11,6 @@
#define WM0010_PDATA_H
struct wm0010_pdata {
- int gpio_reset;
-
- /* Set if there is an inverter between the GPIO controlling
- * the reset signal and the device.
- */
- int reset_active_high;
int irq_flags;
};
diff --git a/include/sound/wm1250-ev1.h b/include/sound/wm1250-ev1.h
deleted file mode 100644
index d16614ebec..0000000000
--- a/include/sound/wm1250-ev1.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/wm1250-ev1.h - Platform data for WM1250-EV1
- *
- * Copyright 2011 Wolfson Microelectronics. PLC.
- */
-
-#ifndef __LINUX_SND_WM1250_EV1_H
-#define __LINUX_SND_WM1250_EV1_H
-
-#define WM1250_EV1_NUM_GPIOS 5
-
-#define WM1250_EV1_GPIO_CLK_ENA 0
-#define WM1250_EV1_GPIO_CLK_SEL0 1
-#define WM1250_EV1_GPIO_CLK_SEL1 2
-#define WM1250_EV1_GPIO_OSR 3
-#define WM1250_EV1_GPIO_MASTER 4
-
-
-struct wm1250_ev1_pdata {
- int gpios[WM1250_EV1_NUM_GPIOS];
-};
-
-#endif
diff --git a/include/sound/wm2200.h b/include/sound/wm2200.h
index 9987e6c09b..2e4913ee25 100644
--- a/include/sound/wm2200.h
+++ b/include/sound/wm2200.h
@@ -42,8 +42,6 @@ struct wm2200_micbias {
};
struct wm2200_pdata {
- int reset; /** GPIO controlling /RESET, if any */
- int ldo_ena; /** GPIO controlling LODENA, if any */
int irq_flags;
int gpio_defaults[4];
diff --git a/include/sound/wm5100.h b/include/sound/wm5100.h
index b94badf729..1c48090fdb 100644
--- a/include/sound/wm5100.h
+++ b/include/sound/wm5100.h
@@ -36,11 +36,7 @@ struct wm5100_jack_mode {
#define WM5100_GPIO_SET 0x10000
struct wm5100_pdata {
- int reset; /** GPIO controlling /RESET, if any */
- int ldo_ena; /** GPIO controlling LODENA, if any */
- int hp_pol; /** GPIO controlling headset polarity, if any */
int irq_flags;
- int gpio_base;
struct wm5100_jack_mode jack_modes[2];
diff --git a/include/sound/wm8996.h b/include/sound/wm8996.h
index 247f9917e3..342abeef28 100644
--- a/include/sound/wm8996.h
+++ b/include/sound/wm8996.h
@@ -33,8 +33,6 @@ struct wm8996_retune_mobile_config {
struct wm8996_pdata {
int irq_flags; /** Set IRQ trigger flags; default active low */
- int ldo_ena; /** GPIO for LDO1; -1 for none */
-
int micdet_def; /** Default MICDET_SRC/HP1FB_SRC/MICD_BIAS */
enum wm8996_inmode inl_mode;
@@ -42,7 +40,6 @@ struct wm8996_pdata {
u32 spkmute_seq; /** Value for register 0x802 */
- int gpio_base;
u32 gpio_default[5];
int num_retune_mobile_cfgs;
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index caec276515..08f2c93d6b 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -18,97 +18,6 @@
#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-enum afs_call_trace {
- afs_call_trace_alloc,
- afs_call_trace_free,
- afs_call_trace_get,
- afs_call_trace_put,
- afs_call_trace_wake,
- afs_call_trace_work,
-};
-
-enum afs_server_trace {
- afs_server_trace_alloc,
- afs_server_trace_callback,
- afs_server_trace_destroy,
- afs_server_trace_free,
- afs_server_trace_gc,
- afs_server_trace_get_by_addr,
- afs_server_trace_get_by_uuid,
- afs_server_trace_get_caps,
- afs_server_trace_get_install,
- afs_server_trace_get_new_cbi,
- afs_server_trace_get_probe,
- afs_server_trace_give_up_cb,
- afs_server_trace_purging,
- afs_server_trace_put_call,
- afs_server_trace_put_cbi,
- afs_server_trace_put_find_rsq,
- afs_server_trace_put_probe,
- afs_server_trace_put_slist,
- afs_server_trace_put_slist_isort,
- afs_server_trace_put_uuid_rsq,
- afs_server_trace_update,
-};
-
-
-enum afs_volume_trace {
- afs_volume_trace_alloc,
- afs_volume_trace_free,
- afs_volume_trace_get_alloc_sbi,
- afs_volume_trace_get_cell_insert,
- afs_volume_trace_get_new_op,
- afs_volume_trace_get_query_alias,
- afs_volume_trace_put_cell_dup,
- afs_volume_trace_put_cell_root,
- afs_volume_trace_put_destroy_sbi,
- afs_volume_trace_put_free_fc,
- afs_volume_trace_put_put_op,
- afs_volume_trace_put_query_alias,
- afs_volume_trace_put_validate_fc,
- afs_volume_trace_remove,
-};
-
-enum afs_cell_trace {
- afs_cell_trace_alloc,
- afs_cell_trace_free,
- afs_cell_trace_get_queue_dns,
- afs_cell_trace_get_queue_manage,
- afs_cell_trace_get_queue_new,
- afs_cell_trace_get_vol,
- afs_cell_trace_insert,
- afs_cell_trace_manage,
- afs_cell_trace_put_candidate,
- afs_cell_trace_put_destroy,
- afs_cell_trace_put_queue_fail,
- afs_cell_trace_put_queue_work,
- afs_cell_trace_put_vol,
- afs_cell_trace_see_source,
- afs_cell_trace_see_ws,
- afs_cell_trace_unuse_alias,
- afs_cell_trace_unuse_check_alias,
- afs_cell_trace_unuse_delete,
- afs_cell_trace_unuse_fc,
- afs_cell_trace_unuse_lookup,
- afs_cell_trace_unuse_mntpt,
- afs_cell_trace_unuse_no_pin,
- afs_cell_trace_unuse_parse,
- afs_cell_trace_unuse_pin,
- afs_cell_trace_unuse_probe,
- afs_cell_trace_unuse_sbi,
- afs_cell_trace_unuse_ws,
- afs_cell_trace_use_alias,
- afs_cell_trace_use_check_alias,
- afs_cell_trace_use_fc,
- afs_cell_trace_use_fc_alias,
- afs_cell_trace_use_lookup,
- afs_cell_trace_use_mntpt,
- afs_cell_trace_use_pin,
- afs_cell_trace_use_probe,
- afs_cell_trace_use_sbi,
- afs_cell_trace_wait,
-};
-
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -202,121 +111,6 @@ enum yfs_cm_operation {
yfs_CB_CallBack = 64204,
};
-enum afs_edit_dir_op {
- afs_edit_dir_create,
- afs_edit_dir_create_error,
- afs_edit_dir_create_inval,
- afs_edit_dir_create_nospc,
- afs_edit_dir_delete,
- afs_edit_dir_delete_error,
- afs_edit_dir_delete_inval,
- afs_edit_dir_delete_noent,
-};
-
-enum afs_edit_dir_reason {
- afs_edit_dir_for_create,
- afs_edit_dir_for_link,
- afs_edit_dir_for_mkdir,
- afs_edit_dir_for_rename_0,
- afs_edit_dir_for_rename_1,
- afs_edit_dir_for_rename_2,
- afs_edit_dir_for_rmdir,
- afs_edit_dir_for_silly_0,
- afs_edit_dir_for_silly_1,
- afs_edit_dir_for_symlink,
- afs_edit_dir_for_unlink,
-};
-
-enum afs_eproto_cause {
- afs_eproto_bad_status,
- afs_eproto_cb_count,
- afs_eproto_cb_fid_count,
- afs_eproto_cellname_len,
- afs_eproto_file_type,
- afs_eproto_ibulkst_cb_count,
- afs_eproto_ibulkst_count,
- afs_eproto_motd_len,
- afs_eproto_offline_msg_len,
- afs_eproto_volname_len,
- afs_eproto_yvl_fsendpt4_len,
- afs_eproto_yvl_fsendpt6_len,
- afs_eproto_yvl_fsendpt_num,
- afs_eproto_yvl_fsendpt_type,
- afs_eproto_yvl_vlendpt4_len,
- afs_eproto_yvl_vlendpt6_len,
- afs_eproto_yvl_vlendpt_type,
-};
-
-enum afs_io_error {
- afs_io_error_cm_reply,
- afs_io_error_extract,
- afs_io_error_fs_probe_fail,
- afs_io_error_vl_lookup_fail,
- afs_io_error_vl_probe_fail,
-};
-
-enum afs_file_error {
- afs_file_error_dir_bad_magic,
- afs_file_error_dir_big,
- afs_file_error_dir_missing_page,
- afs_file_error_dir_name_too_long,
- afs_file_error_dir_over_end,
- afs_file_error_dir_small,
- afs_file_error_dir_unmarked_ext,
- afs_file_error_mntpt,
- afs_file_error_writeback_fail,
-};
-
-enum afs_flock_event {
- afs_flock_acquired,
- afs_flock_callback_break,
- afs_flock_defer_unlock,
- afs_flock_extend_fail,
- afs_flock_fail_other,
- afs_flock_fail_perm,
- afs_flock_no_lockers,
- afs_flock_release_fail,
- afs_flock_silly_delete,
- afs_flock_timestamp,
- afs_flock_try_to_lock,
- afs_flock_vfs_lock,
- afs_flock_vfs_locking,
- afs_flock_waited,
- afs_flock_waiting,
- afs_flock_work_extending,
- afs_flock_work_retry,
- afs_flock_work_unlocking,
- afs_flock_would_block,
-};
-
-enum afs_flock_operation {
- afs_flock_op_copy_lock,
- afs_flock_op_flock,
- afs_flock_op_grant,
- afs_flock_op_lock,
- afs_flock_op_release_lock,
- afs_flock_op_return_ok,
- afs_flock_op_return_eagain,
- afs_flock_op_return_edeadlk,
- afs_flock_op_return_error,
- afs_flock_op_set_lock,
- afs_flock_op_unlock,
- afs_flock_op_wake,
-};
-
-enum afs_cb_break_reason {
- afs_cb_break_no_break,
- afs_cb_break_no_promise,
- afs_cb_break_for_callback,
- afs_cb_break_for_deleted,
- afs_cb_break_for_lapsed,
- afs_cb_break_for_s_reinit,
- afs_cb_break_for_unlink,
- afs_cb_break_for_v_break,
- afs_cb_break_for_volume_callback,
- afs_cb_break_for_zap,
-};
-
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -357,9 +151,11 @@ enum afs_cb_break_reason {
EM(afs_volume_trace_alloc, "ALLOC ") \
EM(afs_volume_trace_free, "FREE ") \
EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
+ EM(afs_volume_trace_get_callback, "GET callback ") \
EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
EM(afs_volume_trace_get_new_op, "GET op-new ") \
EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
+ EM(afs_volume_trace_put_callback, "PUT callback ") \
EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
@@ -391,6 +187,7 @@ enum afs_cb_break_reason {
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
+ EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
EM(afs_cell_trace_unuse_probe, "UNU probe ") \
@@ -407,6 +204,40 @@ enum afs_cb_break_reason {
EM(afs_cell_trace_use_sbi, "USE sbi ") \
E_(afs_cell_trace_wait, "WAIT ")
+#define afs_alist_traces \
+ EM(afs_alist_trace_alloc, "ALLOC ") \
+ EM(afs_alist_trace_get_estate, "GET estate") \
+ EM(afs_alist_trace_get_vlgetcaps, "GET vgtcap") \
+ EM(afs_alist_trace_get_vlprobe, "GET vprobe") \
+ EM(afs_alist_trace_get_vlrotate_set, "GET vl-rot") \
+ EM(afs_alist_trace_put_estate, "PUT estate") \
+ EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
+ EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
+ EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
+ EM(afs_alist_trace_put_server_dup, "PUT sv-dup") \
+ EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
+ EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
+ EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
+ EM(afs_alist_trace_put_vlprobe, "PUT vprobe") \
+ EM(afs_alist_trace_put_vlrotate_end, "PUT vr-end") \
+ EM(afs_alist_trace_put_vlrotate_fail, "PUT vr-fai") \
+ EM(afs_alist_trace_put_vlrotate_next, "PUT vr-nxt") \
+ EM(afs_alist_trace_put_vlrotate_restart,"PUT vr-rst") \
+ EM(afs_alist_trace_put_vlserver, "PUT vlsrvr") \
+ EM(afs_alist_trace_put_vlserver_old, "PUT vs-old") \
+ E_(afs_alist_trace_free, "FREE ")
+
+#define afs_estate_traces \
+ EM(afs_estate_trace_alloc_probe, "ALLOC prob") \
+ EM(afs_estate_trace_alloc_server, "ALLOC srvr") \
+ EM(afs_estate_trace_get_server_state, "GET srv-st") \
+ EM(afs_estate_trace_get_getcaps, "GET getcap") \
+ EM(afs_estate_trace_put_getcaps, "PUT getcap") \
+ EM(afs_estate_trace_put_probe, "PUT probe ") \
+ EM(afs_estate_trace_put_server, "PUT server") \
+ EM(afs_estate_trace_put_server_state, "PUT srv-st") \
+ E_(afs_estate_trace_free, "FREE ")
+
#define afs_fs_operations \
EM(afs_FS_FetchData, "FS.FetchData") \
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
@@ -604,15 +435,67 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
- EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
+ EM(afs_cb_break_for_creation_regress, "creation-regress") \
EM(afs_cb_break_for_deleted, "break-del") \
- EM(afs_cb_break_for_lapsed, "break-lapsed") \
EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_v_break, "break-v") \
+ EM(afs_cb_break_for_update_regress, "update-regress") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
- E_(afs_cb_break_for_zap, "break-zap")
+ EM(afs_cb_break_for_vos_release, "break-vos-release") \
+ E_(afs_cb_break_volume_excluded, "vol-excluded")
+
+#define afs_rotate_traces \
+ EM(afs_rotate_trace_aborted, "Abortd") \
+ EM(afs_rotate_trace_busy_sleep, "BsySlp") \
+ EM(afs_rotate_trace_check_vol_status, "VolStt") \
+ EM(afs_rotate_trace_failed, "Failed") \
+ EM(afs_rotate_trace_iter, "Iter ") \
+ EM(afs_rotate_trace_iterate_addr, "ItAddr") \
+ EM(afs_rotate_trace_next_server, "NextSv") \
+ EM(afs_rotate_trace_no_more_servers, "NoMore") \
+ EM(afs_rotate_trace_nomem, "Nomem ") \
+ EM(afs_rotate_trace_probe_error, "PrbErr") \
+ EM(afs_rotate_trace_probe_fileserver, "PrbFsv") \
+ EM(afs_rotate_trace_probe_none, "PrbNon") \
+ EM(afs_rotate_trace_probe_response, "PrbRsp") \
+ EM(afs_rotate_trace_probe_superseded, "PrbSup") \
+ EM(afs_rotate_trace_restart, "Rstart") \
+ EM(afs_rotate_trace_retry_server, "RtrySv") \
+ EM(afs_rotate_trace_selected_server, "SlctSv") \
+ EM(afs_rotate_trace_stale_lock, "StlLck") \
+ EM(afs_rotate_trace_start, "Start ") \
+ EM(afs_rotate_trace_stop, "Stop ") \
+ E_(afs_rotate_trace_stopped, "Stoppd")
+
+/*
+ * Generate enums for tracing information.
+ */
+#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum afs_alist_trace { afs_alist_traces } __mode(byte);
+enum afs_call_trace { afs_call_traces } __mode(byte);
+enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
+enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
+enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
+enum afs_estate_trace { afs_estate_traces } __mode(byte);
+enum afs_file_error { afs_file_errors } __mode(byte);
+enum afs_flock_event { afs_flock_events } __mode(byte);
+enum afs_flock_operation { afs_flock_operations } __mode(byte);
+enum afs_io_error { afs_io_errors } __mode(byte);
+enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
+enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_volume_trace { afs_volume_traces } __mode(byte);
+
+#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
/*
* Export enum symbols via userspace.
@@ -622,21 +505,24 @@ enum afs_cb_break_reason {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+afs_alist_traces;
afs_call_traces;
-afs_server_traces;
+afs_cb_break_reasons;
afs_cell_traces;
-afs_fs_operations;
-afs_vl_operations;
afs_cm_operations;
-yfs_cm_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
-afs_io_errors;
+afs_estate_traces;
afs_file_errors;
-afs_flock_types;
afs_flock_operations;
-afs_cb_break_reasons;
+afs_flock_types;
+afs_fs_operations;
+afs_io_errors;
+afs_rotate_traces;
+afs_server_traces;
+afs_vl_operations;
+yfs_cm_operations;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -654,12 +540,12 @@ TRACE_EVENT(afs_receive_data,
TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
- __field(loff_t, remain )
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
- __field(bool, want_more )
- __field(int, ret )
+ __field(loff_t, remain)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
+ __field(bool, want_more)
+ __field(int, ret)
),
TP_fast_assign(
@@ -686,9 +572,9 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
),
TP_fast_assign(
@@ -708,9 +594,9 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, op )
- __field(u16, service_id )
+ __field(unsigned int, call)
+ __field(u32, op)
+ __field(u16, service_id)
),
TP_fast_assign(
@@ -733,11 +619,11 @@ TRACE_EVENT(afs_call,
TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, op )
- __field(int, ref )
- __field(int, outstanding )
- __field(const void *, where )
+ __field(unsigned int, call)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, outstanding)
+ __field(const void *, where)
),
TP_fast_assign(
@@ -762,9 +648,9 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -794,10 +680,10 @@ TRACE_EVENT(afs_make_fs_calli,
TP_ARGS(call, fid, i),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, i )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(unsigned int, i)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -829,10 +715,10 @@ TRACE_EVENT(afs_make_fs_call1,
TP_ARGS(call, fid, name),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -866,11 +752,11 @@ TRACE_EVENT(afs_make_fs_call2,
TP_ARGS(call, fid, name, name2),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
- __array(char, name2, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
+ __array(char, name2, 24)
),
TP_fast_assign(
@@ -907,8 +793,8 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_vl_operation, op )
+ __field(unsigned int, call)
+ __field(enum afs_vl_operation, op)
),
TP_fast_assign(
@@ -927,10 +813,10 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(struct rxrpc_call *, rx_call )
- __field(int, ret )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(struct rxrpc_call *, rx_call)
+ __field(int, ret)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -953,10 +839,10 @@ TRACE_EVENT(afs_send_data,
TP_ARGS(call, msg),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, flags )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(unsigned int, flags)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -977,10 +863,10 @@ TRACE_EVENT(afs_sent_data,
TP_ARGS(call, msg, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ret )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(int, ret)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -1001,9 +887,9 @@ TRACE_EVENT(afs_dir_check_failed,
TP_ARGS(vnode, off, i_size),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(loff_t, off )
- __field(loff_t, i_size )
+ __field(struct afs_vnode *, vnode)
+ __field(loff_t, off)
+ __field(loff_t, i_size)
),
TP_fast_assign(
@@ -1016,37 +902,6 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
-TRACE_EVENT(afs_folio_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
-
- TP_ARGS(vnode, where, folio),
-
- TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(const char *, where )
- __field(pgoff_t, index )
- __field(unsigned long, from )
- __field(unsigned long, to )
- ),
-
- TP_fast_assign(
- unsigned long priv = (unsigned long)folio_get_private(folio);
- __entry->vnode = vnode;
- __entry->where = where;
- __entry->index = folio_index(folio);
- __entry->from = afs_folio_dirty_from(folio, priv);
- __entry->to = afs_folio_dirty_to(folio, priv);
- __entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
- (1UL << (BITS_PER_LONG - 1)) : 0);
- ),
-
- TP_printk("vn=%p %lx %s %lx-%lx%s",
- __entry->vnode, __entry->index, __entry->where,
- __entry->from,
- __entry->to & ~(1UL << (BITS_PER_LONG - 1)),
- __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
- );
-
TRACE_EVENT(afs_call_state,
TP_PROTO(struct afs_call *call,
enum afs_call_state from,
@@ -1056,11 +911,11 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, from )
- __field(enum afs_call_state, to )
- __field(int, ret )
- __field(u32, abort )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, from)
+ __field(enum afs_call_state, to)
+ __field(int, ret)
+ __field(u32, abort)
),
TP_fast_assign(
@@ -1084,9 +939,9 @@ TRACE_EVENT(afs_lookup,
TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, dfid )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field_struct(struct afs_fid, dfid)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1116,15 +971,15 @@ TRACE_EVENT(afs_edit_dir,
TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
TP_STRUCT__entry(
- __field(unsigned int, vnode )
- __field(unsigned int, unique )
- __field(enum afs_edit_dir_reason, why )
- __field(enum afs_edit_dir_op, op )
- __field(unsigned int, block )
- __field(unsigned short, slot )
- __field(unsigned int, f_vnode )
- __field(unsigned int, f_unique )
- __array(char, name, 24 )
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_edit_dir_reason, why)
+ __field(enum afs_edit_dir_op, op)
+ __field(unsigned int, block)
+ __field(unsigned short, slot)
+ __field(unsigned int, f_vnode)
+ __field(unsigned int, f_unique)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1157,8 +1012,8 @@ TRACE_EVENT(afs_protocol_error,
TP_ARGS(call, cause),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_eproto_cause, cause )
+ __field(unsigned int, call)
+ __field(enum afs_eproto_cause, cause)
),
TP_fast_assign(
@@ -1177,9 +1032,9 @@ TRACE_EVENT(afs_io_error,
TP_ARGS(call, error, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, error )
- __field(enum afs_io_error, where )
+ __field(unsigned int, call)
+ __field(int, error)
+ __field(enum afs_io_error, where)
),
TP_fast_assign(
@@ -1199,9 +1054,9 @@ TRACE_EVENT(afs_file_error,
TP_ARGS(vnode, error, where),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(int, error )
- __field(enum afs_file_error, where )
+ __field_struct(struct afs_fid, fid)
+ __field(int, error)
+ __field(enum afs_file_error, where)
),
TP_fast_assign(
@@ -1247,9 +1102,9 @@ TRACE_EVENT(afs_cm_no_server,
TP_ARGS(call, srx),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1268,9 +1123,9 @@ TRACE_EVENT(afs_cm_no_server_u,
TP_ARGS(call, uuid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(uuid_t, uuid )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(uuid_t, uuid)
),
TP_fast_assign(
@@ -1290,11 +1145,11 @@ TRACE_EVENT(afs_flock_ev,
TP_ARGS(vnode, fl, event, error),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_flock_event, event )
- __field(enum afs_lock_state, state )
- __field(int, error )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_flock_event, event)
+ __field(enum afs_lock_state, state)
+ __field(int, error)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1320,13 +1175,13 @@ TRACE_EVENT(afs_flock_op,
TP_ARGS(vnode, fl, op),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(loff_t, from )
- __field(loff_t, len )
- __field(enum afs_flock_operation, op )
- __field(unsigned char, type )
- __field(unsigned int, flags )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(loff_t, from)
+ __field(loff_t, len)
+ __field(enum afs_flock_operation, op)
+ __field(unsigned char, type)
+ __field(unsigned int, flags)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1353,7 +1208,7 @@ TRACE_EVENT(afs_reload_dir,
TP_ARGS(vnode),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -1370,8 +1225,8 @@ TRACE_EVENT(afs_silly_rename,
TP_ARGS(vnode, done),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(bool, done )
+ __field_struct(struct afs_fid, fid)
+ __field(bool, done)
),
TP_fast_assign(
@@ -1390,9 +1245,9 @@ TRACE_EVENT(afs_get_tree,
TP_ARGS(cell, volume),
TP_STRUCT__entry(
- __field(u64, vid )
- __array(char, cell, 24 )
- __array(char, volume, 24 )
+ __field(u64, vid)
+ __array(char, cell, 24)
+ __array(char, volume, 24)
),
TP_fast_assign(
@@ -1410,6 +1265,30 @@ TRACE_EVENT(afs_get_tree,
__entry->cell, __entry->volume, __entry->vid)
);
+TRACE_EVENT(afs_cb_v_break,
+ TP_PROTO(afs_volid_t vid, unsigned int cb_v_break,
+ enum afs_cb_break_reason reason),
+
+ TP_ARGS(vid, cb_v_break, reason),
+
+ TP_STRUCT__entry(
+ __field(afs_volid_t, vid)
+ __field(unsigned int, cb_v_break)
+ __field(enum afs_cb_break_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->vid = vid;
+ __entry->cb_v_break = cb_v_break;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%llx vb=%x %s",
+ __entry->vid,
+ __entry->cb_v_break,
+ __print_symbolic(__entry->reason, afs_cb_break_reasons))
+ );
+
TRACE_EVENT(afs_cb_break,
TP_PROTO(struct afs_fid *fid, unsigned int cb_break,
enum afs_cb_break_reason reason, bool skipped),
@@ -1417,10 +1296,10 @@ TRACE_EVENT(afs_cb_break,
TP_ARGS(fid, cb_break, reason, skipped),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(unsigned int, cb_break )
- __field(enum afs_cb_break_reason, reason )
- __field(bool, skipped )
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, cb_break)
+ __field(enum afs_cb_break_reason, reason)
+ __field(bool, skipped)
),
TP_fast_assign(
@@ -1443,8 +1322,8 @@ TRACE_EVENT(afs_cb_miss,
TP_ARGS(fid, reason),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_cb_break_reason, reason )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_cb_break_reason, reason)
),
TP_fast_assign(
@@ -1464,10 +1343,10 @@ TRACE_EVENT(afs_server,
TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, server )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, server)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1490,9 +1369,9 @@ TRACE_EVENT(afs_volume,
TP_ARGS(vid, ref, reason),
TP_STRUCT__entry(
- __field(afs_volid_t, vid )
- __field(int, ref )
- __field(enum afs_volume_trace, reason )
+ __field(afs_volid_t, vid)
+ __field(int, ref)
+ __field(enum afs_volume_trace, reason)
),
TP_fast_assign(
@@ -1514,10 +1393,10 @@ TRACE_EVENT(afs_cell,
TP_ARGS(cell_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, cell )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, cell)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1534,6 +1413,199 @@ TRACE_EVENT(afs_cell,
__entry->active)
);
+TRACE_EVENT(afs_alist,
+ TP_PROTO(unsigned int alist_debug_id, int ref, enum afs_alist_trace reason),
+
+ TP_ARGS(alist_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, alist)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->alist = alist_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("AL=%08x %s r=%d",
+ __entry->alist,
+ __print_symbolic(__entry->reason, afs_alist_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_estate,
+ TP_PROTO(unsigned int server_debug_id, unsigned int estate_debug_id,
+ int ref, enum afs_estate_trace reason),
+
+ TP_ARGS(server_debug_id, estate_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server_debug_id;
+ __entry->estate = estate_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("ES=%08x[%x] %s r=%d",
+ __entry->server,
+ __entry->estate,
+ __print_symbolic(__entry->reason, afs_estate_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_fs_probe,
+ TP_PROTO(struct afs_server *server, bool tx, struct afs_endpoint_state *estate,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, estate, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(bool, tx)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ struct afs_addr_list *alist = estate->addresses;
+ __entry->server = server->debug_id;
+ __entry->estate = estate->probe_seq;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("s=%08x %s pq=%x ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->estate,
+ __entry->addr_index, __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_vl_probe,
+ TP_PROTO(struct afs_vlserver *server, bool tx, struct afs_addr_list *alist,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, alist, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(bool, tx)
+ __field(unsigned short, flags)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server->debug_id;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("vl=%08x %s ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->addr_index,
+ __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_rotate,
+ TP_PROTO(struct afs_operation *op, enum afs_rotate_trace reason, unsigned int extra),
+
+ TP_ARGS(op, reason, extra),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, op)
+ __field(unsigned int, flags)
+ __field(unsigned int, extra)
+ __field(unsigned short, iteration)
+ __field(short, server_index)
+ __field(short, addr_index)
+ __field(enum afs_rotate_trace, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->flags = op->flags;
+ __entry->iteration = op->nr_iterations;
+ __entry->server_index = op->server_index;
+ __entry->addr_index = op->addr_index;
+ __entry->reason = reason;
+ __entry->extra = extra;
+ ),
+
+ TP_printk("OP=%08x it=%02x %s fl=%x sx=%d ax=%d ext=%d",
+ __entry->op,
+ __entry->iteration,
+ __print_symbolic(__entry->reason, afs_rotate_traces),
+ __entry->flags,
+ __entry->server_index,
+ __entry->addr_index,
+ __entry->extra)
+ );
+
+TRACE_EVENT(afs_make_call,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(bool, is_vl)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->op = call->operation_ID;
+ __entry->fid = call->fid;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(call->peer),
+ sizeof(__entry->srx));
+ __entry->srx.srx_service = call->service_id;
+ __entry->is_vl = (__entry->srx.srx_service == VL_SERVICE ||
+ __entry->srx.srx_service == YFS_VL_SERVICE);
+ ),
+
+ TP_printk("c=%08x %pISpc+%u %s %llx:%llx:%x",
+ __entry->call,
+ &__entry->srx.transport,
+ __entry->srx.srx_service,
+ __entry->is_vl ?
+ __print_symbolic(__entry->op, afs_vl_operations) :
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 279a7a0c90..90b0222390 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -21,7 +21,7 @@ struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
struct btrfs_block_group;
struct btrfs_free_cluster;
-struct map_lookup;
+struct btrfs_chunk_map;
struct extent_buffer;
struct btrfs_work;
struct btrfs_workqueue;
@@ -265,20 +265,20 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
- { EXTENT_MAP_INLINE, "INLINE" }, \
- { EXTENT_MAP_DELALLOC, "DELALLOC" })
+ { EXTENT_MAP_INLINE, "INLINE" })
#define show_map_type(type) \
type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
- { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
- { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
- { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
- { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
+ { EXTENT_FLAG_PINNED, "PINNED" },\
+ { EXTENT_FLAG_COMPRESS_ZLIB, "COMPRESS_ZLIB" },\
+ { EXTENT_FLAG_COMPRESS_LZO, "COMPRESS_LZO" },\
+ { EXTENT_FLAG_COMPRESS_ZSTD, "COMPRESS_ZSTD" },\
+ { EXTENT_FLAG_PREALLOC, "PREALLOC" },\
+ { EXTENT_FLAG_LOGGING, "LOGGING" },\
+ { EXTENT_FLAG_FILLING, "FILLING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -297,9 +297,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( u64, orig_start )
__field( u64, block_start )
__field( u64, block_len )
- __field( unsigned long, flags )
+ __field( u32, flags )
__field( int, refs )
- __field( unsigned int, compress_type )
),
TP_fast_assign_btrfs(root->fs_info,
@@ -312,13 +311,11 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->block_len = map->block_len;
__entry->flags = map->flags;
__entry->refs = refcount_read(&map->refs);
- __entry->compress_type = map->compress_type;
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
"orig_start=%llu block_start=%llu(%s) "
- "block_len=%llu flags=%s refs=%u "
- "compress_type=%u",
+ "block_len=%llu flags=%s refs=%u",
show_root_type(__entry->root_objectid),
__entry->ino,
__entry->start,
@@ -327,7 +324,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
show_map_type(__entry->block_start),
__entry->block_len,
show_map_flags(__entry->flags),
- __entry->refs, __entry->compress_type)
+ __entry->refs)
);
TRACE_EVENT(btrfs_handle_em_exist,
@@ -1061,7 +1058,7 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size),
@@ -1095,7 +1092,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1103,7 +1100,7 @@ DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -2099,17 +2096,12 @@ TRACE_EVENT(btrfs_set_extent_bit,
__field( unsigned, set_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2137,17 +2129,12 @@ TRACE_EVENT(btrfs_clear_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->clear_bits = clear_bits;
@@ -2176,17 +2163,12 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 65029dfb92..a697f4b771 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -772,15 +772,14 @@ TRACE_EVENT(ext4_mb_release_group_pa,
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
+ TP_PROTO(struct inode *inode, unsigned int len),
- TP_ARGS(inode, len, needed),
+ TP_ARGS(inode, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( unsigned int, len )
- __field( unsigned int, needed )
),
@@ -788,13 +787,11 @@ TRACE_EVENT(ext4_discard_preallocations,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->len = len;
- __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu len: %u needed %u",
+ TP_printk("dev %d,%d ino %lu len: %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->len,
- __entry->needed)
+ (unsigned long) __entry->ino, __entry->len)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 793f82cc15..7ed0fc430d 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -161,6 +161,19 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ EX_READ, "Read" }, \
{ EX_BLOCK_AGE, "Block Age" })
+#define show_inode_type(x) \
+ __print_symbolic(x, \
+ { S_IFLNK, "symbolic" }, \
+ { S_IFREG, "regular" }, \
+ { S_IFDIR, "directory" }, \
+ { S_IFCHR, "character" }, \
+ { S_IFBLK, "block" }, \
+ { S_IFIFO, "fifo" }, \
+ { S_IFSOCK, "sock" })
+
+#define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \
+ S_IRWXU | S_IRWXG | S_IRWXO)
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -215,17 +228,21 @@ DECLARE_EVENT_CLASS(f2fs__inode_exit,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
+ __field(umode_t, mode)
__field(int, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, ret = %d",
+ TP_printk("dev = (%d,%d), ino = %lu, type: %s, mode = 0%o, ret = %d",
show_dev_ino(__entry),
+ show_inode_type(__entry->mode & S_IFMT),
+ __entry->mode & S_ALL_PERM,
__entry->ret)
);
@@ -866,6 +883,75 @@ TRACE_EVENT(f2fs_lookup_end,
__entry->err)
);
+TRACE_EVENT(f2fs_rename_start,
+
+ TP_PROTO(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags),
+
+ TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __field(ino_t, new_pino)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dir->i_sb->s_dev;
+ __entry->ino = old_dir->i_ino;
+ __assign_str(old_name, old_dentry->d_name.name);
+ __entry->new_pino = new_dir->i_ino;
+ __assign_str(new_name, new_dentry->d_name.name);
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), old_dir = %lu, old_name: %s, "
+ "new_dir = %lu, new_name: %s, flags = %u",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __entry->new_pino,
+ __get_str(new_name),
+ __entry->flags)
+);
+
+TRACE_EVENT(f2fs_rename_end,
+
+ TP_PROTO(struct dentry *old_dentry, struct dentry *new_dentry,
+ unsigned int flags, int ret),
+
+ TP_ARGS(old_dentry, new_dentry, flags, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dentry->d_sb->s_dev;
+ __entry->ino = old_dentry->d_inode->i_ino;
+ __assign_str(old_name, old_dentry->d_name.name);
+ __assign_str(new_name, new_dentry->d_name.name);
+ __entry->flags = flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, old_name: %s, "
+ "new_name: %s, flags = %u, ret = %d",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __get_str(new_name),
+ __entry->flags,
+ __entry->ret)
+);
+
TRACE_EVENT(f2fs_readdir,
TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err),
@@ -1283,13 +1369,6 @@ DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
TP_ARGS(page, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
-
- TP_PROTO(struct page *page, int type),
-
- TP_ARGS(page, type)
-);
-
TRACE_EVENT(f2fs_replace_atomic_write_block,
TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
@@ -1327,30 +1406,50 @@ TRACE_EVENT(f2fs_replace_atomic_write_block,
__entry->recovery)
);
-TRACE_EVENT(f2fs_filemap_fault,
+DECLARE_EVENT_CLASS(f2fs_mmap,
- TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
- TP_ARGS(inode, index, ret),
+ TP_ARGS(inode, index, flags, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(unsigned long, ret)
+ __field(vm_flags_t, flags)
+ __field(vm_fault_t, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = index;
+ __entry->flags = flags;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+ TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- __entry->ret)
+ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+ __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE))
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
);
TRACE_EVENT(f2fs_writepages,
diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h
index b5ac35c1d0..e728647b5d 100644
--- a/include/trace/events/ksm.h
+++ b/include/trace/events/ksm.h
@@ -245,6 +245,39 @@ TRACE_EVENT(ksm_remove_rmap_item,
__entry->pfn, __entry->rmap_item, __entry->mm)
);
+/**
+ * ksm_advisor - called after the advisor has run
+ *
+ * @scan_time: scan time in seconds
+ * @pages_to_scan: new pages_to_scan value
+ * @cpu_percent: cpu usage in percent
+ *
+ * Allows to trace the ksm advisor.
+ */
+TRACE_EVENT(ksm_advisor,
+
+ TP_PROTO(s64 scan_time, unsigned long pages_to_scan,
+ unsigned int cpu_percent),
+
+ TP_ARGS(scan_time, pages_to_scan, cpu_percent),
+
+ TP_STRUCT__entry(
+ __field(s64, scan_time)
+ __field(unsigned long, pages_to_scan)
+ __field(unsigned int, cpu_percent)
+ ),
+
+ TP_fast_assign(
+ __entry->scan_time = scan_time;
+ __entry->pages_to_scan = pages_to_scan;
+ __entry->cpu_percent = cpu_percent;
+ ),
+
+ TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u",
+ __entry->scan_time, __entry->pages_to_scan,
+ __entry->cpu_percent)
+);
+
#endif /* _TRACE_KSM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 3bd31ea23f..011fba6b55 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -62,7 +62,7 @@ TRACE_EVENT(kvm_vcpu_wakeup,
__entry->valid ? "valid" : "invalid")
);
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
@@ -82,7 +82,7 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
#if defined(__KVM_HAVE_IOAPIC)
#define kvm_deliver_mode \
@@ -170,7 +170,7 @@ TRACE_EVENT(kvm_msi_set_irq,
#endif /* defined(__KVM_HAVE_IOAPIC) */
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
#ifdef kvm_irqchips
#define kvm_ack_irq_string "irqchip %s pin %u"
@@ -197,7 +197,7 @@ TRACE_EVENT(kvm_ack_irq,
TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index d801409b33..d55e53ac91 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -135,6 +135,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
#define __def_pagetype_names \
+ DEF_PAGETYPE_NAME(hugetlb), \
DEF_PAGETYPE_NAME(offline), \
DEF_PAGETYPE_NAME(guard), \
DEF_PAGETYPE_NAME(table), \
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index beec534cba..447a8c21cf 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -16,34 +16,57 @@
* Define enums for tracing information.
*/
#define netfs_read_traces \
+ EM(netfs_read_trace_dio_read, "DIO-READ ") \
EM(netfs_read_trace_expanded, "EXPANDED ") \
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
+#define netfs_write_traces \
+ EM(netfs_write_trace_dio_write, "DIO-WRITE") \
+ EM(netfs_write_trace_launder, "LAUNDER ") \
+ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
+ EM(netfs_write_trace_writeback, "WRITEBACK") \
+ E_(netfs_write_trace_writethrough, "WRITETHRU")
+
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
- E_(NETFS_READ_FOR_WRITE, "RW")
+ EM(NETFS_READ_FOR_WRITE, "RW") \
+ EM(NETFS_WRITEBACK, "WB") \
+ EM(NETFS_WRITETHROUGH, "WT") \
+ EM(NETFS_LAUNDER_WRITE, "LW") \
+ EM(NETFS_UNBUFFERED_WRITE, "UW") \
+ EM(NETFS_DIO_READ, "DR") \
+ E_(NETFS_DIO_WRITE, "DW")
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_done, "DONE ") \
EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
- E_(netfs_rreq_trace_unmark, "UNMARK ")
+ EM(netfs_rreq_trace_unmark, "UNMARK ") \
+ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
+ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
+ E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
EM(NETFS_READ_FROM_CACHE, "READ") \
- E_(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \
+ EM(NETFS_WRITE_TO_CACHE, "WRIT") \
+ E_(NETFS_INVALID_WRITE, "INVL")
#define netfs_sreq_traces \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_limited, "LIMIT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
EM(netfs_sreq_trace_resubmit_short, "SHORT") \
EM(netfs_sreq_trace_submit, "SUBMT") \
@@ -55,19 +78,24 @@
#define netfs_failures \
EM(netfs_fail_check_write_begin, "check-write-begin") \
EM(netfs_fail_copy_to_cache, "copy-to-cache") \
+ EM(netfs_fail_dio_read_short, "dio-read-short") \
+ EM(netfs_fail_dio_read_zero, "dio-read-zero") \
EM(netfs_fail_read, "read") \
EM(netfs_fail_short_read, "short-read") \
- E_(netfs_fail_prepare_write, "prep-write")
+ EM(netfs_fail_prepare_write, "prep-write") \
+ E_(netfs_fail_write, "write")
#define netfs_rreq_ref_traces \
- EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
+ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
- EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
+ EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \
+ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
- EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
+ EM(netfs_rreq_trace_put_work, "PUT WORK ") \
+ EM(netfs_rreq_trace_see_work, "SEE WORK ") \
E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \
@@ -76,11 +104,44 @@
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
+ EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
+ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
+ EM(netfs_sreq_trace_put_work, "PUT WORK ") \
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
+#define netfs_folio_traces \
+ /* The first few correspond to enum netfs_how_to_modify */ \
+ EM(netfs_folio_is_uptodate, "mod-uptodate") \
+ EM(netfs_just_prefetch, "mod-prefetch") \
+ EM(netfs_whole_folio_modify, "mod-whole-f") \
+ EM(netfs_modify_and_clear, "mod-n-clear") \
+ EM(netfs_streaming_write, "mod-streamw") \
+ EM(netfs_streaming_write_cont, "mod-streamw+") \
+ EM(netfs_flush_content, "flush") \
+ EM(netfs_streaming_filled_page, "mod-streamw-f") \
+ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
+ /* The rest are for writeback */ \
+ EM(netfs_folio_trace_clear, "clear") \
+ EM(netfs_folio_trace_clear_s, "clear-s") \
+ EM(netfs_folio_trace_clear_g, "clear-g") \
+ EM(netfs_folio_trace_copy_to_cache, "copy") \
+ EM(netfs_folio_trace_end_copy, "end-copy") \
+ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
+ EM(netfs_folio_trace_kill, "kill") \
+ EM(netfs_folio_trace_launder, "launder") \
+ EM(netfs_folio_trace_mkwrite, "mkwrite") \
+ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
+ EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_redirty, "redirty") \
+ EM(netfs_folio_trace_redirtied, "redirtied") \
+ EM(netfs_folio_trace_store, "store") \
+ EM(netfs_folio_trace_store_plus, "store+") \
+ EM(netfs_folio_trace_wthru, "wthru") \
+ E_(netfs_folio_trace_wthru_plus, "wthru+")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -90,11 +151,13 @@
#define E_(a, b) a
enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_write_trace { netfs_write_traces } __mode(byte);
enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
enum netfs_failure { netfs_failures } __mode(byte);
enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
#endif
@@ -107,6 +170,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
netfs_read_traces;
+netfs_write_traces;
netfs_rreq_origins;
netfs_rreq_traces;
netfs_sreq_sources;
@@ -114,6 +178,7 @@ netfs_sreq_traces;
netfs_failures;
netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
+netfs_folio_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -314,6 +379,82 @@ TRACE_EVENT(netfs_sreq_ref,
__entry->ref)
);
+TRACE_EVENT(netfs_folio,
+ TP_PROTO(struct folio *folio, enum netfs_folio_trace why),
+
+ TP_ARGS(folio, why),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(unsigned int, nr)
+ __field(enum netfs_folio_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->why = why;
+ __entry->index = folio_index(folio);
+ __entry->nr = folio_nr_pages(folio);
+ ),
+
+ TP_printk("i=%05lx ix=%05lx-%05lx %s",
+ __entry->ino, __entry->index, __entry->index + __entry->nr - 1,
+ __print_symbolic(__entry->why, netfs_folio_traces))
+ );
+
+TRACE_EVENT(netfs_write_iter,
+ TP_PROTO(const struct kiocb *iocb, const struct iov_iter *from),
+
+ TP_ARGS(iocb, from),
+
+ TP_STRUCT__entry(
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->start = iocb->ki_pos;
+ __entry->len = iov_iter_count(from);
+ __entry->flags = iocb->ki_flags;
+ ),
+
+ TP_printk("WRITE-ITER s=%llx l=%zx f=%x",
+ __entry->start, __entry->len, __entry->flags)
+ );
+
+TRACE_EVENT(netfs_write,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ enum netfs_write_trace what),
+
+ TP_ARGS(wreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned int, cookie )
+ __field(enum netfs_write_trace, what )
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(wreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->wreq = wreq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->what = what;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ ),
+
+ TP_printk("R=%08x %s c=%08x by=%llx-%llx",
+ __entry->wreq,
+ __print_symbolic(__entry->what, netfs_write_traces),
+ __entry->cookie,
+ __entry->start, __entry->start + __entry->len - 1)
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index ba2d96a1bc..f50fcafc69 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -609,7 +609,7 @@ TRACE_EVENT(rpcgss_context,
__field(unsigned int, timeout)
__field(u32, window_size)
__field(int, len)
- __string(acceptor, data)
+ __string_len(acceptor, data, len)
),
TP_fast_assign(
@@ -618,7 +618,7 @@ TRACE_EVENT(rpcgss_context,
__entry->timeout = timeout;
__entry->window_size = window_size;
__entry->len = len;
- strncpy(__get_str(acceptor), data, len);
+ __assign_str(acceptor, data);
),
TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 718df1d9b8..110c1475c5 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -22,47 +22,37 @@
** Event classes
**/
-DECLARE_EVENT_CLASS(rpcrdma_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
TP_PROTO(
- const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
- TP_ARGS(wc, cid),
+ TP_ARGS(cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
- __field(unsigned long, status)
- __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
- __entry->status = wc->status;
- if (wc->status)
- __entry->vendor_err = wc->vendor_err;
- else
- __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
- __entry->cq_id, __entry->completion_id,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
-#define DEFINE_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_completion_class, name, \
+#define DEFINE_SIMPLE_CID_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_simple_cid_class, name, \
TP_PROTO( \
- const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
- TP_ARGS(wc, cid))
+ TP_ARGS(cid) \
+ )
-DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_completion_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
@@ -73,20 +63,29 @@ DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d",
- __entry->cq_id, __entry->completion_id
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
)
);
-#define DEFINE_SEND_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_send_completion_class, name, \
+#define DEFINE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_completion_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
@@ -978,27 +977,7 @@ TRACE_EVENT(xprtrdma_post_send_err,
)
);
-TRACE_EVENT(xprtrdma_post_recv,
- TP_PROTO(
- const struct rpcrdma_rep *rep
- ),
-
- TP_ARGS(rep),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = rep->rr_cid.ci_queue_id;
- __entry->completion_id = rep->rr_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
TRACE_EVENT(xprtrdma_post_recvs,
TP_PROTO(
@@ -1783,29 +1762,29 @@ DEFINE_ERROR_EVENT(chunk);
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
u64 dma_addr,
u32 length
),
- TP_ARGS(rdma, dma_addr, length),
+ TP_ARGS(cid, dma_addr, length),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u64, dma_addr)
__field(u32, length)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->dma_addr = dma_addr;
__entry->length = length;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
- __get_str(addr), __get_str(device),
+ TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->dma_addr, __entry->length
)
);
@@ -1813,11 +1792,12 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
#define DEFINE_SVC_DMA_EVENT(name) \
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
TP_PROTO( \
- const struct svcxprt_rdma *rdma,\
+ const struct rpc_rdma_cid *cid, \
u64 dma_addr, \
u32 length \
), \
- TP_ARGS(rdma, dma_addr, length))
+ TP_ARGS(cid, dma_addr, length) \
+ )
DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_map_err);
@@ -1826,33 +1806,37 @@ DEFINE_SVC_DMA_EVENT(dma_unmap_page);
TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ u64 offset,
+ u32 handle,
unsigned int nents,
int status
),
- TP_ARGS(rdma, nents, status),
+ TP_ARGS(rdma, offset, handle, nents, status),
TP_STRUCT__entry(
- __field(int, status)
+ __field(u32, cq_id)
+ __field(u32, handle)
+ __field(u64, offset)
__field(unsigned int, nents)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ __field(int, status)
),
TP_fast_assign(
- __entry->status = status;
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
+ __entry->handle = handle;
+ __entry->offset = offset;
__entry->nents = nents;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ __entry->status = status;
),
- TP_printk("addr=%s device=%s nents=%u status=%d",
- __get_str(addr), __get_str(device), __entry->nents,
- __entry->status
+ TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
+ __entry->cq_id, (unsigned long long)__entry->offset,
+ __entry->handle, __entry->nents, __entry->status
)
);
-TRACE_EVENT(svcrdma_no_rwctx_err,
+TRACE_EVENT(svcrdma_rwctx_empty,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int num_sges
@@ -1861,79 +1845,75 @@ TRACE_EVENT(svcrdma_no_rwctx_err,
TP_ARGS(rdma, num_sges),
TP_STRUCT__entry(
+ __field(u32, cq_id)
__field(unsigned int, num_sges)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
__entry->num_sges = num_sges;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s num_sges=%d",
- __get_str(addr), __get_str(device), __entry->num_sges
+ TP_printk("cq.id=%u num_sges=%d",
+ __entry->cq_id, __entry->num_sges
)
);
TRACE_EVENT(svcrdma_page_overrun_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
- const struct svc_rqst *rqst,
+ const struct rpc_rdma_cid *cid,
unsigned int pageno
),
- TP_ARGS(rdma, rqst, pageno),
+ TP_ARGS(cid, pageno),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, pageno)
- __field(u32, xid)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->pageno = pageno;
- __entry->xid = __be32_to_cpu(rqst->rq_xid);
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
- __get_str(device), __entry->xid, __entry->pageno
+ TP_printk("cq.id=%u cid=%d pageno=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->pageno
)
);
TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
unsigned int remaining,
unsigned int seg_no,
unsigned int num_segs
),
- TP_ARGS(rdma, remaining, seg_no, num_segs),
+ TP_ARGS(cid, remaining, seg_no, num_segs),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, remaining)
__field(unsigned int, seg_no)
__field(unsigned int, num_segs)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->remaining = remaining;
__entry->seg_no = seg_no;
__entry->num_segs = num_segs;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
- __get_str(addr), __get_str(device), __entry->remaining,
- __entry->seg_no, __entry->num_segs
+ TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->remaining, __entry->seg_no, __entry->num_segs
)
);
@@ -2020,31 +2000,11 @@ TRACE_EVENT(svcrdma_post_send,
)
);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
-TRACE_EVENT(svcrdma_post_recv,
- TP_PROTO(
- const struct svc_rdma_recv_ctxt *ctxt
- ),
-
- TP_ARGS(ctxt),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = ctxt->rc_cid.ci_queue_id;
- __entry->completion_id = ctxt->rc_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
@@ -2152,8 +2112,9 @@ TRACE_EVENT(svcrdma_wc_read,
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
@@ -2184,65 +2145,74 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
-DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
+DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma
+ const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid
),
- TP_ARGS(rdma),
+ TP_ARGS(rdma, cid),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d",
- __get_str(addr), __entry->avail, __entry->depth
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth
)
);
#define DEFINE_SQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
- TP_PROTO( \
- const struct svcxprt_rdma *rdma \
- ), \
- TP_ARGS(rdma))
+ DEFINE_EVENT(svcrdma_sendqueue_class, name, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(rdma, cid) \
+ )
-DEFINE_SQ_EVENT(full);
-DEFINE_SQ_EVENT(retry);
+DEFINE_SQ_EVENT(svcrdma_sq_full);
+DEFINE_SQ_EVENT(svcrdma_sq_retry);
TRACE_EVENT(svcrdma_sq_post_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, cid, status),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
__field(int, status)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
__entry->status = status;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
- __get_str(addr), __entry->avail, __entry->depth,
- __entry->status
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth, __entry->status
)
);
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6188ad0d9e..dbb01b4b74 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -493,33 +493,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
- TP_ARGS(tsk, __perf_count(runtime), vruntime),
+ TP_ARGS(tsk, __perf_count(runtime)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
- __field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
- __entry->vruntime = vruntime;
),
- TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
__entry->comm, __entry->pid,
- (unsigned long long)__entry->runtime,
- (unsigned long long)__entry->vruntime)
+ (unsigned long long)__entry->runtime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime));
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
+ TP_ARGS(tsk, runtime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 337c90787f..cdd3a45e60 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1675,7 +1675,6 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
svc_rqst_flag(LOCAL) \
svc_rqst_flag(USEDEFERRAL) \
svc_rqst_flag(DROPME) \
- svc_rqst_flag(SPLICE_OK) \
svc_rqst_flag(VICTIM) \
svc_rqst_flag_end(DATA)
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index b4bc2828fa..1ef58a04fc 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -46,22 +46,21 @@ DEFINE_EVENT(timer_class, timer_init,
/**
* timer_start - called when the timer is started
- * @timer: pointer to struct timer_list
- * @expires: the timers expiry time
- * @flags: the timers flags
+ * @timer: pointer to struct timer_list
+ * @bucket_expiry: the bucket expiry time
*/
TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer,
- unsigned long expires,
- unsigned int flags),
+ unsigned long bucket_expiry),
- TP_ARGS(timer, expires, flags),
+ TP_ARGS(timer, bucket_expiry),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
+ __field( unsigned long, bucket_expiry )
__field( unsigned long, now )
__field( unsigned int, flags )
),
@@ -69,15 +68,16 @@ TRACE_EVENT(timer_start,
TP_fast_assign(
__entry->timer = timer;
__entry->function = timer->function;
- __entry->expires = expires;
+ __entry->expires = timer->expires;
+ __entry->bucket_expiry = bucket_expiry;
__entry->now = jiffies;
- __entry->flags = flags;
+ __entry->flags = timer->flags;
),
- TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
- __entry->flags & TIMER_CPUMASK,
+ __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
@@ -142,6 +142,26 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+TRACE_EVENT(timer_base_idle,
+
+ TP_PROTO(bool is_idle, unsigned int cpu),
+
+ TP_ARGS(is_idle, cpu),
+
+ TP_STRUCT__entry(
+ __field( bool, is_idle )
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->is_idle = is_idle;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("is_idle=%d cpu=%d",
+ __entry->is_idle, __entry->cpu)
+);
+
#define decode_clockid(type) \
__print_symbolic(type, \
{ CLOCK_REALTIME, "CLOCK_REALTIME" }, \
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 756b013fb8..75f00965ab 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
+#define __NR_statmount 457
+__SYSCALL(__NR_statmount, sys_statmount)
+
+#define __NR_listmount 458
+__SYSCALL(__NR_listmount, sys_listmount)
+
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
+
#undef __NR_syscalls
-#define __NR_syscalls 457
+#define __NR_syscalls 462
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index de723566c5..16122819ed 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -713,7 +713,8 @@ struct drm_gem_open {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@@ -773,6 +774,13 @@ struct drm_gem_open {
* :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -842,6 +850,31 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
+
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -893,6 +926,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -901,6 +935,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
/**
@@ -1218,6 +1268,26 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+/**
+ * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
+ *
+ * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
+ * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
+ * alive. When the plane no longer uses the framebuffer (because the
+ * framebuffer is replaced with another one, or the plane is disabled), the
+ * framebuffer is cleaned up.
+ *
+ * This is useful to implement flicker-free transitions between two processes.
+ *
+ * Depending on the threat model, user-space may want to ensure that the
+ * framebuffer doesn't expose any sensitive user information: closed
+ * framebuffers attached to a plane can be read back by the next DRM master.
+ */
+#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3151f1fc7e..84d502e429 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -54,7 +54,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
- * modifier is specific to the modifer being used. For example, some modifiers
+ * modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@@ -79,7 +79,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
- * These users musn't need to know to reason about the modifier value
+ * These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@@ -540,7 +540,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consits out of four 256 byte units, which are also laid
+ * Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -1103,7 +1103,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
- * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
+ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@@ -1419,7 +1419,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
- * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 128d09138c..7040e7ea80 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -36,10 +36,10 @@ extern "C" {
/**
* DOC: overview
*
- * DRM exposes many UAPI and structure definition to have a consistent
- * and standardized interface with user.
+ * DRM exposes many UAPI and structure definitions to have a consistent
+ * and standardized interface with users.
* Userspace can refer to these structure definitions and UAPI formats
- * to communicate to driver
+ * to communicate to drivers.
*/
#define DRM_CONNECTOR_NAME_LEN 32
@@ -540,7 +540,7 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
- * witout being aware that this could be triggering a lengthy modeset.
+ * without being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
@@ -664,7 +664,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
-#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
/**
* struct drm_mode_fb_cmd2 - Frame-buffer metadata.
@@ -846,6 +846,14 @@ struct drm_color_ctm {
__u64 matrix[9];
};
+struct drm_color_ctm_3x4 {
+ /*
+ * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[12];
+};
+
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@@ -881,8 +889,8 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @display_primaries.x: X cordinate of color primary.
- * @display_primaries.y: Y cordinate of color primary.
+ * @display_primaries.x: X coordinate of color primary.
+ * @display_primaries.y: Y coordinate of color primary.
*/
struct {
__u16 x, y;
@@ -892,8 +900,8 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @white_point.x: X cordinate of whitepoint of color primary.
- * @white_point.y: Y cordinate of whitepoint of color primary.
+ * @white_point.x: X coordinate of whitepoint of color primary.
+ * @white_point.y: Y coordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
@@ -957,6 +965,15 @@ struct hdr_output_metadata {
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
+ *
+ * When used with atomic uAPI, the driver will return an error if the hardware
+ * doesn't support performing an asynchronous page-flip for this update.
+ * User-space should handle this, e.g. by falling back to a regular page-flip.
+ *
+ * Note, some hardware might need to perform one last synchronous page-flip
+ * before being able to switch to asynchronous page-flips. As an exception,
+ * the driver will return success even though that first page-flip is not
+ * asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
@@ -1323,6 +1340,16 @@ struct drm_mode_rect {
__s32 y2;
};
+/**
+ * struct drm_mode_closefb
+ * @fb_id: Framebuffer ID.
+ * @pad: Must be zero.
+ */
+struct drm_mode_closefb {
+ __u32 fb_id;
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h
index 347c7b62e6..a512dc4cff 100644
--- a/include/uapi/drm/habanalabs_accel.h
+++ b/include/uapi/drm/habanalabs_accel.h
@@ -846,6 +846,7 @@ enum hl_server_type {
#define HL_INFO_HW_ERR_EVENT 36
#define HL_INFO_FW_ERR_EVENT 37
#define HL_INFO_USER_ENGINE_ERR_EVENT 38
+#define HL_INFO_DEV_SIGNED 40
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -1256,6 +1257,7 @@ struct hl_info_dev_memalloc_page_sizes {
#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+#define SEC_DEV_INFO_BUF_SZ 5120
/*
* struct hl_info_sec_attest - attestation report of the boot
@@ -1290,6 +1292,32 @@ struct hl_info_sec_attest {
__u8 pad0[2];
};
+/*
+ * struct hl_info_signed - device information signed by a secured device.
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @public_data: public key info signed info data (outPublic + name + qualifiedName)
+ * @certificate: certificate for the signing key
+ * @info_sig: signature of the info + nonce data.
+ * @dev_info_len: length of device info (bytes)
+ * @dev_info: device info as byte array.
+ */
+struct hl_info_signed {
+ __u32 nonce;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 info_sig_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __u16 dev_info_len;
+ __u8 dev_info[SEC_DEV_INFO_BUF_SZ];
+ __u8 pad[2];
+};
+
/**
* struct hl_page_fault_info - page fault information.
* @timestamp: timestamp of page fault.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 218edb0a96..fd4f9574d1 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
- * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE.
*/
#define I915_PARAM_HAS_EXEC_CAPTURE 45
@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy {
* is accurate.
*
* The returned dword is split into two fields to indicate both
- * the engine classess on which the object is being read, and the
+ * the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise {
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
- * or wont be and could be discarded under memory pressure.
+ * or won't be and could be discarded under memory pressure.
*/
__u32 madv;
@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info {
* // enough to hold our array of engines. The kernel will fill out the
* // item.length for us, which is the number of bytes we need.
* //
- * // Alternatively a large buffer can be allocated straight away enabling
+ * // Alternatively a large buffer can be allocated straightaway enabling
* // querying in one pass, in which case item.length should contain the
* // length of the provided buffer.
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info {
* // Now that we allocated the required number of bytes, we call the ioctl
* // again, this time with the data_ptr pointing to our newly allocated
* // blob, which the kernel can then populate with info on all engines.
- * item.data_ptr = (uintptr_t)&info,
+ * item.data_ptr = (uintptr_t)&info;
*
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
* if (err) ...
@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info {
/**
* struct drm_i915_engine_info
*
- * Describes one engine and it's capabilities as known to the driver.
+ * Describes one engine and its capabilities as known to the driver.
*/
struct drm_i915_engine_info {
/** @engine: Engine class and instance. */
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 262db0c3be..19a13468ec 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -53,7 +53,7 @@ extern "C" {
#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
-#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
+#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */
#define DRM_IVPU_PARAM_CONTEXT_ID 7
#define DRM_IVPU_PARAM_FW_API_VERSION 8
#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
@@ -64,11 +64,18 @@ extern "C" {
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
+/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
+#define DRM_IVPU_JOB_PRIORITY_IDLE 1
+#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
+#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
+#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
+
/**
* DRM_IVPU_CAP_METRIC_STREAMER
*
@@ -112,10 +119,6 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
* Lowest VPU virtual address available in the current context (read-only)
*
- * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- * Value of current context scheduling priority (read-write).
- * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
- *
* %DRM_IVPU_PARAM_CONTEXT_ID:
* Current context ID, always greater than 0 (read-only)
*
@@ -196,7 +199,7 @@ struct drm_ivpu_bo_create {
*
* %DRM_IVPU_BO_UNCACHED:
*
- * Allocated BO will not be cached on host side nor snooped on the VPU side.
+ * Not supported. Use DRM_IVPU_BO_WC instead.
*
* %DRM_IVPU_BO_WC:
*
@@ -286,10 +289,23 @@ struct drm_ivpu_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
+
+ /**
+ * @priority:
+ *
+ * Priority to be set for related job command queue, can be one of the following:
+ * %DRM_IVPU_JOB_PRIORITY_DEFAULT
+ * %DRM_IVPU_JOB_PRIORITY_IDLE
+ * %DRM_IVPU_JOB_PRIORITY_NORMAL
+ * %DRM_IVPU_JOB_PRIORITY_FOCUS
+ * %DRM_IVPU_JOB_PRIORITY_REALTIME
+ */
+ __u32 priority;
};
/* drm_ivpu_bo_wait job status codes */
#define DRM_IVPU_JOB_STATUS_SUCCESS 0
+#define DRM_IVPU_JOB_STATUS_ABORTED 256
/**
* struct drm_ivpu_bo_wait - Wait for BO to become inactive
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 6c34272a13..d8a6b34727 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -86,6 +86,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
+#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -139,6 +140,8 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
+#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
+#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 0bade1592f..77d7ff0d5b 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -54,6 +54,20 @@ extern "C" {
*/
#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
+/*
+ * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
+ *
+ * Query the VRAM BAR size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
+
+/*
+ * NOUVEAU_GETPARAM_VRAM_USED
+ *
+ * Get remaining VRAM size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_USED 19
+
struct drm_nouveau_getparam {
__u64 param;
__u64 value;
diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h
new file mode 100644
index 0000000000..ccf6c21124
--- /dev/null
+++ b/include/uapi/drm/pvr_drm.h
@@ -0,0 +1,1295 @@
+/* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DRM_UAPI_H
+#define PVR_DRM_UAPI_H
+
+#include "drm.h"
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: PowerVR UAPI
+ *
+ * The PowerVR IOCTL argument structs have a few limitations in place, in
+ * addition to the standard kernel restrictions:
+ *
+ * - All members must be type-aligned.
+ * - The overall struct must be padded to 64-bit alignment.
+ * - Explicit padding is almost always required. This takes the form of
+ * ``_padding_[x]`` members of sufficient size to pad to the next power-of-two
+ * alignment, where [x] is the offset into the struct in hexadecimal. Arrays
+ * are never used for alignment. Padding fields must be zeroed; this is
+ * always checked.
+ * - Unions may only appear as the last member of a struct.
+ * - Individual union members may grow in the future. The space between the
+ * end of a union member and the end of its containing union is considered
+ * "implicit padding" and must be zeroed. This is always checked.
+ *
+ * In addition to the IOCTL argument structs, the PowerVR UAPI makes use of
+ * DEV_QUERY argument structs. These are used to fetch information about the
+ * device and runtime. These structs are subject to the same rules set out
+ * above.
+ */
+
+/**
+ * struct drm_pvr_obj_array - Container used to pass arrays of objects
+ *
+ * It is not unusual to have to extend objects to pass new parameters, and the DRM
+ * ioctl infrastructure is supporting that by padding ioctl arguments with zeros
+ * when the data passed by userspace is smaller than the struct defined in the
+ * drm_ioctl_desc, thus keeping things backward compatible. This type is just
+ * applying the same concepts to indirect objects passed through arrays referenced
+ * from the main ioctl arguments structure: the stride basically defines the size
+ * of the object passed by userspace, which allows the kernel driver to pad with
+ * zeros when it's smaller than the size of the object it expects.
+ *
+ * Use ``DRM_PVR_OBJ_ARRAY()`` to fill object array fields, unless you
+ * have a very good reason not to.
+ */
+struct drm_pvr_obj_array {
+ /** @stride: Stride of object struct. Used for versioning. */
+ __u32 stride;
+
+ /** @count: Number of objects in the array. */
+ __u32 count;
+
+ /** @array: User pointer to an array of objects. */
+ __u64 array;
+};
+
+/**
+ * DRM_PVR_OBJ_ARRAY() - Helper macro for filling &struct drm_pvr_obj_array.
+ * @cnt: Number of elements pointed to py @ptr.
+ * @ptr: Pointer to start of a C array.
+ *
+ * Return: Literal of type &struct drm_pvr_obj_array.
+ */
+#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \
+ { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
+
+/**
+ * DOC: PowerVR IOCTL interface
+ */
+
+/**
+ * PVR_IOCTL() - Build a PowerVR IOCTL number
+ * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE.
+ * @_mode: Must be one of %DRM_IOR, %DRM_IOW or %DRM_IOWR.
+ * @_data: The type of the args struct passed by this IOCTL.
+ *
+ * The struct referred to by @_data must have a ``drm_pvr_ioctl_`` prefix and an
+ * ``_args suffix``. They are therefore omitted from @_data.
+ *
+ * This should only be used to build the constants described below; it should
+ * never be used to call an IOCTL directly.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define PVR_IOCTL(_ioctl, _mode, _data) \
+ _mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args)
+
+#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query)
+#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo)
+#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset)
+#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context)
+#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context)
+#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map)
+#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap)
+#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context)
+#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context)
+#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list)
+#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list)
+#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset)
+#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset)
+#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs)
+
+/**
+ * DOC: PowerVR IOCTL DEV_QUERY interface
+ */
+
+/**
+ * struct drm_pvr_dev_query_gpu_info - Container used to fetch information about
+ * the graphics processor.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_GPU_INFO_GET.
+ */
+struct drm_pvr_dev_query_gpu_info {
+ /**
+ * @gpu_id: GPU identifier.
+ *
+ * For all currently supported GPUs this is the BVNC encoded as a 64-bit
+ * value as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ */
+ __u64 gpu_id;
+
+ /**
+ * @num_phantoms: Number of Phantoms present.
+ */
+ __u32 num_phantoms;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_runtime_info - Container used to fetch information
+ * about the graphics runtime.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET.
+ */
+struct drm_pvr_dev_query_runtime_info {
+ /**
+ * @free_list_min_pages: Minimum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_min_pages;
+
+ /**
+ * @free_list_max_pages: Maximum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_max_pages;
+
+ /**
+ * @common_store_alloc_region_size: Size of the Allocation
+ * Region within the Common Store used for coefficient and shared
+ * registers, in dwords.
+ */
+ __u32 common_store_alloc_region_size;
+
+ /**
+ * @common_store_partition_space_size: Size of the
+ * Partition Space within the Common Store for output buffers, in
+ * dwords.
+ */
+ __u32 common_store_partition_space_size;
+
+ /**
+ * @max_coeffs: Maximum coefficients, in dwords.
+ */
+ __u32 max_coeffs;
+
+ /**
+ * @cdm_max_local_mem_size_regs: Maximum amount of local
+ * memory available to a compute kernel, in dwords.
+ */
+ __u32 cdm_max_local_mem_size_regs;
+};
+
+/**
+ * struct drm_pvr_dev_query_quirks - Container used to fetch information about
+ * hardware fixes for which the device may require support in the user mode
+ * driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_QUIRKS_GET.
+ */
+struct drm_pvr_dev_query_quirks {
+ /**
+ * @quirks: A userspace address for the hardware quirks __u32 array.
+ *
+ * The first @musthave_count items in the list are quirks that the
+ * client must support for this device. If userspace does not support
+ * all these quirks then functionality is not guaranteed and client
+ * initialisation must fail.
+ * The remaining quirks in the list affect userspace and the kernel or
+ * firmware. They are disabled by default and require userspace to
+ * opt-in. The opt-in mechanism depends on the quirk.
+ */
+ __u64 quirks;
+
+ /** @count: Length of @quirks (number of __u32). */
+ __u16 count;
+
+ /**
+ * @musthave_count: The number of entries in @quirks that are
+ * mandatory, starting at index 0.
+ */
+ __u16 musthave_count;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_enhancements - Container used to fetch information
+ * about optional enhancements supported by the device that require support in
+ * the user mode driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_ENHANCEMENTS_GET.
+ */
+struct drm_pvr_dev_query_enhancements {
+ /**
+ * @enhancements: A userspace address for the hardware enhancements
+ * __u32 array.
+ *
+ * These enhancements affect userspace and the kernel or firmware. They
+ * are disabled by default and require userspace to opt-in. The opt-in
+ * mechanism depends on the enhancement.
+ */
+ __u64 enhancements;
+
+ /** @count: Length of @enhancements (number of __u32). */
+ __u16 count;
+
+ /** @_padding_a: Reserved. This field must be zeroed. */
+ __u16 _padding_a;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * enum drm_pvr_heap_id - Array index for heap info data returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some heaps may not be present. These are indicated where
+ * &struct drm_pvr_heap.size is set to zero.
+ */
+enum drm_pvr_heap_id {
+ /** @DRM_PVR_HEAP_GENERAL: General purpose heap. */
+ DRM_PVR_HEAP_GENERAL = 0,
+ /** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code and data heap. */
+ DRM_PVR_HEAP_PDS_CODE_DATA,
+ /** @DRM_PVR_HEAP_USC_CODE: USC code heap. */
+ DRM_PVR_HEAP_USC_CODE,
+ /** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */
+ DRM_PVR_HEAP_RGNHDR,
+ /** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */
+ DRM_PVR_HEAP_VIS_TEST,
+ /** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */
+ DRM_PVR_HEAP_TRANSFER_FRAG,
+
+ /**
+ * @DRM_PVR_HEAP_COUNT: The number of heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * More heaps may be added, so this also serves as the copy limit when
+ * sent by the caller.
+ */
+ DRM_PVR_HEAP_COUNT
+ /* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */
+};
+
+/**
+ * struct drm_pvr_heap - Container holding information about a single heap.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_heap {
+ /** @base: Base address of heap. */
+ __u64 base;
+
+ /** @size: Size of heap, in bytes. Will be 0 if the heap is not present. */
+ __u64 size;
+
+ /** @flags: Flags for this heap. Currently always 0. */
+ __u32 flags;
+
+ /** @page_size_log2: Log2 of page size. */
+ __u32 page_size_log2;
+};
+
+/**
+ * struct drm_pvr_dev_query_heap_info - Container used to fetch information
+ * about heaps supported by the device driver.
+ *
+ * Please note all driver-supported heaps will be returned up to &heaps.count.
+ * Some heaps will not be present in all devices, which will be indicated by
+ * &struct drm_pvr_heap.size being set to zero.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+struct drm_pvr_dev_query_heap_info {
+ /**
+ * @heaps: Array of &struct drm_pvr_heap. If pointer is NULL, the count
+ * and stride will be updated with those known to the driver version, to
+ * facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array heaps;
+};
+
+/**
+ * enum drm_pvr_static_data_area_usage - Array index for static data area info
+ * returned by %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some areas may not be present. These are indicated where
+ * &struct drm_pvr_static_data_area.size is set to zero.
+ */
+enum drm_pvr_static_data_area_usage {
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile PDS program code segment.
+ *
+ * The End of Tile PDS task runs at completion of a tile during a fragment job, and is
+ * responsible for emitting the tile to the Pixel Back End.
+ */
+ DRM_PVR_STATIC_DATA_AREA_EOT = 0,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and
+ * invalidation.
+ *
+ * This must point to valid physical memory but the contents otherwise are not used.
+ */
+ DRM_PVR_STATIC_DATA_AREA_FENCE,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program.
+ *
+ * The VDM sync program is used to synchronise multiple areas of the GPU hardware.
+ */
+ DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients.
+ *
+ * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed
+ * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits.
+ *
+ * The slots are :
+ * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR
+ * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range)
+ * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range)
+ * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range)
+ * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range)
+ * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range)
+ * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range)
+ * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range)
+ * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range)
+ * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit)
+ * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit)
+ * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit)
+ * 14 = Identity (biased)
+ * 15 = Identity
+ */
+ DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+};
+
+/**
+ * struct drm_pvr_static_data_area - Container holding information about a
+ * single static data area.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_static_data_area {
+ /**
+ * @area_usage: Usage of static data area.
+ * See &enum drm_pvr_static_data_area_usage.
+ */
+ __u16 area_usage;
+
+ /**
+ * @location_heap_id: Array index of heap where this of static data
+ * area is located. This array is fetched using
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u16 location_heap_id;
+
+ /** @size: Size of static data area. Not present if set to zero. */
+ __u32 size;
+
+ /** @offset: Offset of static data area from start of heap. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_pvr_dev_query_static_data_areas - Container used to fetch
+ * information about the static data areas in heaps supported by the device
+ * driver.
+ *
+ * Please note all driver-supported static data areas will be returned up to
+ * &static_data_areas.count. Some will not be present for all devices which,
+ * will be indicated by &struct drm_pvr_static_data_area.size being set to zero.
+ *
+ * Further, some heaps will not be present either. See &struct
+ * drm_pvr_dev_query_heap_info.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ */
+struct drm_pvr_dev_query_static_data_areas {
+ /**
+ * @static_data_areas: Array of &struct drm_pvr_static_data_area. If
+ * pointer is NULL, the count and stride will be updated with those
+ * known to the driver version, to facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array static_data_areas;
+};
+
+/**
+ * enum drm_pvr_dev_query - For use with &drm_pvr_ioctl_dev_query_args.type to
+ * indicate the type of the receiving container.
+ *
+ * Append only. Do not reorder.
+ */
+enum drm_pvr_dev_query {
+ /**
+ * @DRM_PVR_DEV_QUERY_GPU_INFO_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_gpu_info.
+ */
+ DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_runtime_info.
+ */
+ DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_QUIRKS_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_quirks.
+ */
+ DRM_PVR_DEV_QUERY_QUIRKS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_enhancements.
+ */
+ DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_HEAP_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_heap_info.
+ */
+ DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: The dev query args contain
+ * a pointer to &struct drm_pvr_dev_query_static_data_areas.
+ */
+ DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
+};
+
+/**
+ * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY.
+ */
+struct drm_pvr_ioctl_dev_query_args {
+ /**
+ * @type: Type of query and output struct. See &enum drm_pvr_dev_query.
+ */
+ __u32 type;
+
+ /**
+ * @size: Size of the receiving struct, see @type.
+ *
+ * After a successful call this will be updated to the written byte
+ * length.
+ * Can also be used to get the minimum byte length (see @pointer).
+ * This allows additional fields to be appended to the structs in
+ * future.
+ */
+ __u32 size;
+
+ /**
+ * @pointer: Pointer to struct @type.
+ *
+ * Must be large enough to contain @size bytes.
+ * If pointer is NULL, the expected size will be returned in the @size
+ * field, but no other data will be written.
+ */
+ __u64 pointer;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_BO interface
+ */
+
+/**
+ * DOC: Flags for CREATE_BO
+ *
+ * We use "device" to refer to the GPU here because of the ambiguity between CPU and GPU in some
+ * fonts.
+ *
+ * Device mapping options
+ * :DRM_PVR_BO_BYPASS_DEVICE_CACHE: Specify that device accesses to this memory will bypass the
+ * cache. This is used for buffers that will either be regularly updated by the CPU (eg free
+ * lists) or will be accessed only once and therefore isn't worth caching (eg partial render
+ * buffers).
+ * By default, the device flushes its memory caches after every job, so this is not normally
+ * required for coherency.
+ * :DRM_PVR_BO_PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or firmware
+ * processor should be allowed to access this memory when mapped to the device. It is not
+ * valid to specify this flag with DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS.
+ *
+ * CPU mapping options
+ * :DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS: Allow userspace to map and access the contents of this
+ * memory. It is not valid to specify this flag with DRM_PVR_BO_PM_FW_PROTECT.
+ */
+#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0)
+#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1)
+#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2)
+/* Bits 3..63 are reserved. */
+
+#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | \
+ DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS)
+
+/**
+ * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO
+ */
+struct drm_pvr_ioctl_create_bo_args {
+ /**
+ * @size: [IN] Size of buffer object to create. This must be page size
+ * aligned.
+ */
+ __u64 size;
+
+ /**
+ * @handle: [OUT] GEM handle of the new buffer object for use in
+ * userspace.
+ */
+ __u32 handle;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+
+ /**
+ * @flags: [IN] Options which will affect the behaviour of this
+ * creation operation and future mapping operations on the created
+ * object. This field must be a valid combination of ``DRM_PVR_BO_*``
+ * values, with all bits marked as reserved set to zero.
+ */
+ __u64 flags;
+};
+
+/**
+ * DOC: PowerVR IOCTL GET_BO_MMAP_OFFSET interface
+ */
+
+/**
+ * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for
+ * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET
+ *
+ * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory.
+ * Instead, it allocates a fake offset which refers to the specified buffer
+ * object. This offset can be used with a real mmap call on the DRM device
+ * itself.
+ */
+struct drm_pvr_ioctl_get_bo_mmap_offset_args {
+ /** @handle: [IN] GEM handle of the buffer object to be mapped. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /** @offset: [OUT] Fake offset to use in the real mmap call. */
+ __u64 offset;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_VM_CONTEXT and DESTROY_VM_CONTEXT interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_create_vm_context_args {
+ /** @handle: [OUT] Handle for new VM context. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_vm_context_args {
+ /**
+ * @handle: [IN] Handle for VM context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL VM_MAP and VM_UNMAP interfaces
+ *
+ * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space.
+ *
+ * The client is responsible for managing GPU address space. It should allocate mappings within
+ * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for
+ * the mapping. Size and offset within the mapped buffer object can be specified, so the client can
+ * partially map a buffer.
+ *
+ * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address
+ * space only if the size of the mapping matches that known to the driver.
+ */
+
+/**
+ * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP.
+ */
+struct drm_pvr_ioctl_vm_map_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context for this mapping to
+ * exist in.
+ */
+ __u32 vm_context_handle;
+
+ /** @flags: [IN] Flags which affect this mapping. Currently always 0. */
+ __u32 flags;
+
+ /**
+ * @device_addr: [IN] Requested device-virtual address for the mapping.
+ * This must be non-zero and aligned to the device page size for the
+ * heap containing the requested address. It is an error to specify an
+ * address which is not contained within one of the heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u64 device_addr;
+
+ /**
+ * @handle: [IN] Handle of the target buffer object. This must be a
+ * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO.
+ */
+ __u32 handle;
+
+ /** @_padding_14: Reserved. This field must be zeroed. */
+ __u32 _padding_14;
+
+ /**
+ * @offset: [IN] Offset into the target bo from which to begin the
+ * mapping.
+ */
+ __u64 offset;
+
+ /**
+ * @size: [IN] Size of the requested mapping. Must be aligned to
+ * the device page size for the heap containing the requested address,
+ * as well as the host page size. When added to @device_addr, the
+ * result must not overflow the heap which contains @device_addr (i.e.
+ * the range specified by @device_addr and @size must be completely
+ * contained within a single heap specified by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET).
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP.
+ */
+struct drm_pvr_ioctl_vm_unmap_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this mapping
+ * exists in.
+ */
+ __u32 vm_context_handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /**
+ * @device_addr: [IN] Device-virtual address at the start of the target
+ * mapping. This must be non-zero.
+ */
+ __u64 device_addr;
+
+ /**
+ * @size: Size in bytes of the target mapping. This must be non-zero.
+ */
+ __u64 size;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_CONTEXT and DESTROY_CONTEXT interfaces
+ */
+
+/**
+ * enum drm_pvr_ctx_priority - Arguments for
+ * &drm_pvr_ioctl_create_context_args.priority
+ */
+enum drm_pvr_ctx_priority {
+ /** @DRM_PVR_CTX_PRIORITY_LOW: Priority below normal. */
+ DRM_PVR_CTX_PRIORITY_LOW = -512,
+
+ /** @DRM_PVR_CTX_PRIORITY_NORMAL: Normal priority. */
+ DRM_PVR_CTX_PRIORITY_NORMAL = 0,
+
+ /**
+ * @DRM_PVR_CTX_PRIORITY_HIGH: Priority above normal.
+ * Note this requires ``CAP_SYS_NICE`` or ``DRM_MASTER``.
+ */
+ DRM_PVR_CTX_PRIORITY_HIGH = 512,
+};
+
+/**
+ * enum drm_pvr_ctx_type - Arguments for
+ * &struct drm_pvr_ioctl_create_context_args.type
+ */
+enum drm_pvr_ctx_type {
+ /**
+ * @DRM_PVR_CTX_TYPE_RENDER: Render context.
+ */
+ DRM_PVR_CTX_TYPE_RENDER = 0,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context.
+ */
+ DRM_PVR_CTX_TYPE_COMPUTE,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data
+ * master.
+ */
+ DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_ioctl_create_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT
+ */
+struct drm_pvr_ioctl_create_context_args {
+ /**
+ * @type: [IN] Type of context to create.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_type.
+ */
+ __u32 type;
+
+ /** @flags: [IN] Flags for context. */
+ __u32 flags;
+
+ /**
+ * @priority: [IN] Priority of new context.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_priority.
+ */
+ __s32 priority;
+
+ /** @handle: [OUT] Handle for new context. */
+ __u32 handle;
+
+ /**
+ * @static_context_state: [IN] Pointer to static context state stream.
+ */
+ __u64 static_context_state;
+
+ /**
+ * @static_context_state_len: [IN] Length of static context state, in bytes.
+ */
+ __u32 static_context_state_len;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this context is
+ * associated with.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @callstack_addr: [IN] Address for initial call stack pointer. Only valid
+ * if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise must be 0.
+ */
+ __u64 callstack_addr;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_context_args {
+ /**
+ * @handle: [IN] Handle for context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_FREE_LIST and DESTROY_FREE_LIST interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_FREE_LIST
+ *
+ * Free list arguments have the following constraints :
+ *
+ * - @max_num_pages must be greater than zero.
+ * - @grow_threshold must be between 0 and 100.
+ * - @grow_num_pages must be less than or equal to &max_num_pages.
+ * - @initial_num_pages, @max_num_pages and @grow_num_pages must be multiples
+ * of 4.
+ * - When &grow_num_pages is 0, @initial_num_pages must be equal to
+ * @max_num_pages.
+ * - When &grow_num_pages is non-zero, @initial_num_pages must be less than
+ * @max_num_pages.
+ */
+struct drm_pvr_ioctl_create_free_list_args {
+ /**
+ * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object
+ * containing memory to be used by free list.
+ *
+ * The mapped region of the buffer object must be at least
+ * @max_num_pages * ``sizeof(__u32)``.
+ *
+ * The buffer object must have been created with
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and
+ * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set.
+ */
+ __u64 free_list_gpu_addr;
+
+ /** @initial_num_pages: [IN] Pages initially allocated to free list. */
+ __u32 initial_num_pages;
+
+ /** @max_num_pages: [IN] Maximum number of pages in free list. */
+ __u32 max_num_pages;
+
+ /** @grow_num_pages: [IN] Pages to grow free list by per request. */
+ __u32 grow_num_pages;
+
+ /**
+ * @grow_threshold: [IN] Percentage of FL memory used that should
+ * trigger a new grow request.
+ */
+ __u32 grow_threshold;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that the free list buffer
+ * object is mapped in.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @handle: [OUT] Handle for created free list.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_FREE_LIST
+ */
+struct drm_pvr_ioctl_destroy_free_list_args {
+ /**
+ * @handle: [IN] Handle for free list to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces
+ */
+
+/**
+ * struct drm_pvr_create_hwrt_geom_data_args - Geometry data arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.geom_data_args.
+ */
+struct drm_pvr_create_hwrt_geom_data_args {
+ /** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */
+ __u64 tpc_dev_addr;
+
+ /** @tpc_size: [IN] Size of TPC, in bytes. */
+ __u32 tpc_size;
+
+ /** @tpc_stride: [IN] Stride between layers in TPC, in pages */
+ __u32 tpc_stride;
+
+ /** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */
+ __u64 vheap_table_dev_addr;
+
+ /** @rtc_dev_addr: [IN] Render Target Cache virtual address. */
+ __u64 rtc_dev_addr;
+};
+
+/**
+ * struct drm_pvr_create_hwrt_rt_data_args - Render target arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.rt_data_args.
+ */
+struct drm_pvr_create_hwrt_rt_data_args {
+ /** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */
+ __u64 pm_mlist_dev_addr;
+
+ /** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */
+ __u64 macrotile_array_dev_addr;
+
+ /** @region_header_dev_addr: [IN] Region header array GPU virtual address. */
+ __u64 region_header_dev_addr;
+};
+
+#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0
+#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U
+
+/**
+ * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_create_hwrt_dataset_args {
+ /** @geom_data_args: [IN] Geometry data arguments. */
+ struct drm_pvr_create_hwrt_geom_data_args geom_data_args;
+
+ /**
+ * @rt_data_args: [IN] Array of render target arguments.
+ *
+ * Each entry in this array represents a render target in a double buffered
+ * setup.
+ */
+ struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2];
+
+ /**
+ * @free_list_handles: [IN] Array of free list handles.
+ *
+ * free_list_handles[PVR_DRM_HWRT_FREE_LIST_LOCAL] must have initial
+ * size of at least that reported by
+ * &drm_pvr_dev_query_runtime_info.free_list_min_pages.
+ */
+ __u32 free_list_handles[2];
+
+ /** @width: [IN] Width in pixels. */
+ __u32 width;
+
+ /** @height: [IN] Height in pixels. */
+ __u32 height;
+
+ /** @samples: [IN] Number of samples. */
+ __u32 samples;
+
+ /** @layers: [IN] Number of layers. */
+ __u32 layers;
+
+ /** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */
+ __u32 isp_merge_lower_x;
+
+ /** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */
+ __u32 isp_merge_lower_y;
+
+ /** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */
+ __u32 isp_merge_scale_x;
+
+ /** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */
+ __u32 isp_merge_scale_y;
+
+ /** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */
+ __u32 isp_merge_upper_x;
+
+ /** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */
+ __u32 isp_merge_upper_y;
+
+ /**
+ * @region_header_size: [IN] Size of region header array. This common field is used by
+ * both render targets in this data set.
+ *
+ * The units for this field differ depending on what version of the simple internal
+ * parameter format the device uses. If format 2 is in use then this is interpreted as the
+ * number of region headers. For other formats it is interpreted as the size in dwords.
+ */
+ __u32 region_header_size;
+
+ /**
+ * @handle: [OUT] Handle for created HWRT dataset.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_destroy_hwrt_dataset_args {
+ /**
+ * @handle: [IN] Handle for HWRT dataset to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL SUBMIT_JOBS interface
+ */
+
+/**
+ * DOC: Flags for the drm_pvr_sync_op object.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK
+ *
+ * Handle type mask for the drm_pvr_sync_op::flags field.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle.
+ * This is the default type.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL
+ *
+ * Signal operation requested. The out-fence bound to the job will be attached to
+ * the syncobj whose handle is passed in drm_pvr_sync_op::handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT
+ *
+ * Wait operation requested. The job will wait for this particular syncobj or syncobj
+ * point to be signaled before being started.
+ * This is the default operation.
+ */
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1
+#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31)
+#define DRM_PVR_SYNC_OP_FLAG_WAIT 0
+
+#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \
+ DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+
+/**
+ * struct drm_pvr_sync_op - Object describing a sync operation
+ */
+struct drm_pvr_sync_op {
+ /** @handle: Handle of sync object. */
+ __u32 handle;
+
+ /** @flags: Combination of ``DRM_PVR_SYNC_OP_FLAG_`` flags. */
+ __u32 flags;
+
+ /** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */
+ __u64 value;
+};
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl geometry command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST
+ *
+ * Indicates if this the first command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST
+ *
+ * Indicates if this the last command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the geometry cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl fragment command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE
+ *
+ * Use single core in a multi core setup.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER
+ *
+ * Indicates whether a depth buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER
+ *
+ * Indicates whether a stencil buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP
+ *
+ * Disallow compute overlapped with this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS
+ *
+ * Indicates whether this render produces visibility results.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER
+ *
+ * Indicates whether partial renders write to a scratch buffer instead of
+ * the final surface. It also forces the full screen copy expected to be
+ * present on the last render after all partial renders have completed.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE
+ *
+ * Disable pixel merging for this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the fragment cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl compute command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP
+ *
+ * Disallow other jobs overlapped with this compute.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the compute cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl transfer command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+ *
+ * Forces job to use a single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the transfer cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0)
+
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \
+ DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+
+/**
+ * enum drm_pvr_job_type - Arguments for &struct drm_pvr_job.job_type
+ */
+enum drm_pvr_job_type {
+ /** @DRM_PVR_JOB_TYPE_GEOMETRY: Job type is geometry. */
+ DRM_PVR_JOB_TYPE_GEOMETRY = 0,
+
+ /** @DRM_PVR_JOB_TYPE_FRAGMENT: Job type is fragment. */
+ DRM_PVR_JOB_TYPE_FRAGMENT,
+
+ /** @DRM_PVR_JOB_TYPE_COMPUTE: Job type is compute. */
+ DRM_PVR_JOB_TYPE_COMPUTE,
+
+ /** @DRM_PVR_JOB_TYPE_TRANSFER_FRAG: Job type is a fragment transfer. */
+ DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_hwrt_data_ref - Reference HWRT data
+ */
+struct drm_pvr_hwrt_data_ref {
+ /** @set_handle: HWRT data set handle. */
+ __u32 set_handle;
+
+ /** @data_index: Index of the HWRT data inside the data set. */
+ __u32 data_index;
+};
+
+/**
+ * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl
+ */
+struct drm_pvr_job {
+ /**
+ * @type: [IN] Type of job being submitted
+ *
+ * This must be one of the values defined by &enum drm_pvr_job_type.
+ */
+ __u32 type;
+
+ /**
+ * @context_handle: [IN] Context handle.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or
+ * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible
+ * with the type of job being submitted.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero.
+ */
+ __u32 context_handle;
+
+ /**
+ * @flags: [IN] Flags for command.
+ *
+ * Those are job-dependent. See all ``DRM_PVR_SUBMIT_JOB_*``.
+ */
+ __u32 flags;
+
+ /**
+ * @cmd_stream_len: [IN] Length of command stream, in bytes.
+ */
+ __u32 cmd_stream_len;
+
+ /**
+ * @cmd_stream: [IN] Pointer to command stream for command.
+ *
+ * The command stream must be u64-aligned.
+ */
+ __u64 cmd_stream;
+
+ /** @sync_ops: [IN] Fragment sync operations. */
+ struct drm_pvr_obj_array sync_ops;
+
+ /**
+ * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment).
+ *
+ * Must be zero for non-render jobs.
+ */
+ struct drm_pvr_hwrt_data_ref hwrt;
+};
+
+/**
+ * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB
+ *
+ * If the syscall returns an error it is important to check the value of
+ * @jobs.count. This indicates the index into @jobs.array where the
+ * error occurred.
+ */
+struct drm_pvr_ioctl_submit_jobs_args {
+ /** @jobs: [IN] Array of jobs to submit. */
+ struct drm_pvr_obj_array jobs;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_DRM_UAPI_H */
diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h
index 43ac5d8645..9dab32316a 100644
--- a/include/uapi/drm/qaic_accel.h
+++ b/include/uapi/drm/qaic_accel.h
@@ -287,8 +287,9 @@ struct qaic_execute_entry {
* struct qaic_partial_execute_entry - Defines a BO to resize and submit.
* @handle: In. GEM handle of the BO to commit to the device.
* @dir: In. Direction of data. 1 = to device, 2 = from device.
- * @resize: In. New size of the BO. Must be <= the original BO size. 0 is
- * short for no resize.
+ * @resize: In. New size of the BO. Must be <= the original BO size.
+ * @resize as 0 would be interpreted as no DMA transfer is
+ * involved.
*/
struct qaic_partial_execute_entry {
__u32 handle;
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 3dfc0af875..dce1835ece 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -41,6 +41,7 @@ extern "C" {
#define DRM_V3D_PERFMON_CREATE 0x08
#define DRM_V3D_PERFMON_DESTROY 0x09
#define DRM_V3D_PERFMON_GET_VALUES 0x0a
+#define DRM_V3D_SUBMIT_CPU 0x0b
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -56,6 +57,7 @@ extern "C" {
struct drm_v3d_perfmon_destroy)
#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
struct drm_v3d_perfmon_get_values)
+#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
#define DRM_V3D_SUBMIT_EXTENSION 0x02
@@ -69,7 +71,13 @@ extern "C" {
struct drm_v3d_extension {
__u64 next;
__u32 id;
-#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
+#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
+#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
+#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
+#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
+#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
__u32 flags; /* mbz */
};
@@ -93,6 +101,7 @@ enum v3d_queue {
V3D_TFU,
V3D_CSD,
V3D_CACHE_CLEAN,
+ V3D_CPU,
};
/**
@@ -276,6 +285,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
+ DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
};
struct drm_v3d_get_param {
@@ -319,6 +329,11 @@ struct drm_v3d_submit_tfu {
/* Pointer to an array of ioctl extensions*/
__u64 extensions;
+
+ struct {
+ __u32 ioc;
+ __u32 pad;
+ } v71;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -356,6 +371,234 @@ struct drm_v3d_submit_csd {
__u32 pad;
};
+/**
+ * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
+ * indirect CSD
+ *
+ * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
+ * points to this extension to define a indirect CSD submission. It creates a
+ * CPU job linked to a CSD job. The CPU job waits for the indirect CSD
+ * dependencies and, once they are signaled, it updates the CSD job config
+ * before allowing the CSD job execution.
+ */
+struct drm_v3d_indirect_csd {
+ struct drm_v3d_extension base;
+
+ /* Indirect CSD */
+ struct drm_v3d_submit_csd submit;
+
+ /* Handle of the indirect BO, that should be also attached to the
+ * indirect CSD.
+ */
+ __u32 indirect;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ __u32 offset;
+
+ /* Workgroups size */
+ __u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream. If the uniform rewrite is not needed,
+ * the offset must be 0xffffffff.
+ */
+ __u32 wg_uniform_offsets[3];
+};
+
+/**
+ * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
+ * a timestamp query
+ *
+ * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
+ * this extension to define a timestamp query submission. This CPU job will
+ * calculate the timestamp query and update the query value within the
+ * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
+ * query availability.
+ */
+struct drm_v3d_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* mbz */
+ __u32 pad;
+};
+
+/**
+ * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
+ * reset timestamp queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a reset timestamp submission. This CPU
+ * job will reset the timestamp queries based on value offset of the first
+ * query. Moreover, it will reset the timestamp syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Offset of the first query within the timestamp BO for its value */
+ __u32 offset;
+
+ /* Number of queries */
+ __u32 count;
+};
+
+/**
+ * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
+ * query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a copy timestamp query submission. This
+ * CPU job will copy the timestamp queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+};
+
+/**
+ * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
+ * reset performance queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a reset performance submission. This CPU
+ * job will reset the performance queries by resetting the values of the
+ * performance monitors. Moreover, it will reset the syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+/**
+ * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
+ * performance query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a copy performance query submission. This
+ * CPU job will copy the performance queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Number of performance counters related to this query pool */
+ __u32 ncounters;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+struct drm_v3d_submit_cpu {
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ *
+ * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
+ * that contains the workgroup counts.
+ *
+ * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
+ * that will contain the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
+ * one BO, that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
+ * BOs. The first is the BO where the timestamp queries will be written
+ * to. The second is the BO that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
+ * BOs.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
+ * BO, where the performance queries will be written.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+};
+
enum {
V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
V3D_PERFCNT_FEP_VALID_PRIMS,
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index b1d0e56565..c2ce71987e 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -97,6 +97,7 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
+#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@@ -198,6 +199,7 @@ struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
new file mode 100644
index 0000000000..bb0c8a9941
--- /dev/null
+++ b/include/uapi/drm/xe_drm.h
@@ -0,0 +1,1327 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ * Sections in this file are organized as follows:
+ * 1. IOCTL definition
+ * 2. Extension definition and helper structs
+ * 3. IOCTL's Query structs in the order of the Query's entries.
+ * 4. The rest of IOCTL structs in the order of IOCTL declaration.
+ */
+
+/**
+ * DOC: Xe Device Block Diagram
+ *
+ * The diagram below represents a high-level simplification of a discrete
+ * GPU supported by the Xe driver. It shows some device components which
+ * are necessary to understand this API, as well as how their relations
+ * to each other. This diagram does not represent real hardware::
+ *
+ * ┌──────────────────────────────────────────────────────────────────┐
+ * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
+ * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
+ * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
+ * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
+ * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
+ * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
+ * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
+ * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
+ * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
+ * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
+ * └─────────────────────────────Device0───────┬──────────────────────┘
+ * │
+ * ───────────────────────┴────────── PCI bus
+ */
+
+/**
+ * DOC: Xe uAPI Overview
+ *
+ * This section aims to describe the Xe's IOCTL entries, its structs, and other
+ * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
+ * entries and usage.
+ *
+ * List of supported IOCTLs:
+ * - &DRM_IOCTL_XE_DEVICE_QUERY
+ * - &DRM_IOCTL_XE_GEM_CREATE
+ * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ * - &DRM_IOCTL_XE_VM_CREATE
+ * - &DRM_IOCTL_XE_VM_DESTROY
+ * - &DRM_IOCTL_XE_VM_BIND
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ * - &DRM_IOCTL_XE_EXEC
+ * - &DRM_IOCTL_XE_WAIT_USER_FENCE
+ */
+
+/*
+ * xe specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_XE_DEVICE_QUERY 0x00
+#define DRM_XE_GEM_CREATE 0x01
+#define DRM_XE_GEM_MMAP_OFFSET 0x02
+#define DRM_XE_VM_CREATE 0x03
+#define DRM_XE_VM_DESTROY 0x04
+#define DRM_XE_VM_BIND 0x05
+#define DRM_XE_EXEC_QUEUE_CREATE 0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
+#define DRM_XE_EXEC 0x09
+#define DRM_XE_WAIT_USER_FENCE 0x0a
+/* Must be kept compact -- no holes */
+
+#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
+#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
+#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
+#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
+#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
+#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
+#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
+
+/**
+ * DOC: Xe IOCTL Extensions
+ *
+ * Before detailing the IOCTLs and its structs, it is important to highlight
+ * that every IOCTL in Xe is extensible.
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct drm_xe_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+*/
+
+/**
+ * struct drm_xe_user_extension - Base class for defining a chain of extensions
+ */
+struct drm_xe_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct drm_xe_user_extension, or zero if the end.
+ */
+ __u64 next_extension;
+
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct drm_xe_user_extension.
+ */
+ __u32 name;
+
+ /**
+ * @pad: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_xe_ext_set_property - Generic set property extension
+ *
+ * A generic struct that allows any of the Xe's IOCTL to be extended
+ * with a set_property operation.
+ */
+struct drm_xe_ext_set_property {
+ /** @base: base user extension */
+ struct drm_xe_user_extension base;
+
+ /** @property: property to set */
+ __u32 property;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_engine_class_instance - instance of an engine class
+ *
+ * It is returned as part of the @drm_xe_engine, but it also is used as
+ * the input of engine selection for both @drm_xe_exec_queue_create and
+ * @drm_xe_query_engine_cycles
+ *
+ * The @engine_class can be:
+ * - %DRM_XE_ENGINE_CLASS_RENDER
+ * - %DRM_XE_ENGINE_CLASS_COPY
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
+ * - %DRM_XE_ENGINE_CLASS_COMPUTE
+ * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
+ * hardware engine class). Used for creating ordered queues of VM
+ * bind operations.
+ */
+struct drm_xe_engine_class_instance {
+#define DRM_XE_ENGINE_CLASS_RENDER 0
+#define DRM_XE_ENGINE_CLASS_COPY 1
+#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
+#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
+#define DRM_XE_ENGINE_CLASS_COMPUTE 4
+#define DRM_XE_ENGINE_CLASS_VM_BIND 5
+ /** @engine_class: engine class id */
+ __u16 engine_class;
+ /** @engine_instance: engine instance id */
+ __u16 engine_instance;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad;
+};
+
+/**
+ * struct drm_xe_engine - describe hardware engine
+ */
+struct drm_xe_engine {
+ /** @instance: The @drm_xe_engine_class_instance */
+ struct drm_xe_engine_class_instance instance;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_query_engines - describe engines
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engines in .data.
+ */
+struct drm_xe_query_engines {
+ /** @num_engines: number of engines returned in @engines */
+ __u32 num_engines;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @engines: The returned engines for this device */
+ struct drm_xe_engine engines[];
+};
+
+/**
+ * enum drm_xe_memory_class - Supported memory classes.
+ */
+enum drm_xe_memory_class {
+ /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
+ DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
+ /**
+ * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
+ * represents the memory that is local to the device, which we
+ * call VRAM. Not valid on integrated platforms.
+ */
+ DRM_XE_MEM_REGION_CLASS_VRAM
+};
+
+/**
+ * struct drm_xe_mem_region - Describes some region as known to
+ * the driver.
+ */
+struct drm_xe_mem_region {
+ /**
+ * @mem_class: The memory class describing this region.
+ *
+ * See enum drm_xe_memory_class for supported values.
+ */
+ __u16 mem_class;
+ /**
+ * @instance: The unique ID for this region, which serves as the
+ * index in the placement bitmask used as argument for
+ * &DRM_IOCTL_XE_GEM_CREATE
+ */
+ __u16 instance;
+ /**
+ * @min_page_size: Min page-size in bytes for this region.
+ *
+ * When the kernel allocates memory for this region, the
+ * underlying pages will be at least @min_page_size in size.
+ * Buffer objects with an allowable placement in this region must be
+ * created with a size aligned to this value.
+ * GPU virtual address mappings of (parts of) buffer objects that
+ * may be placed in this region must also have their GPU virtual
+ * address and range aligned to this value.
+ * Affected IOCTLS will return %-EINVAL if alignment restrictions are
+ * not met.
+ */
+ __u32 min_page_size;
+ /**
+ * @total_size: The usable size in bytes for this region.
+ */
+ __u64 total_size;
+ /**
+ * @used: Estimate of the memory used in bytes for this region.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero.
+ */
+ __u64 used;
+ /**
+ * @cpu_visible_size: How much of this region can be CPU
+ * accessed, in bytes.
+ *
+ * This will always be <= @total_size, and the remainder (if
+ * any) will not be CPU accessible. If the CPU accessible part
+ * is smaller than @total_size then this is referred to as a
+ * small BAR system.
+ *
+ * On systems without small BAR (full BAR), the probed_size will
+ * always equal the @total_size, since all of it will be CPU
+ * accessible.
+ *
+ * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
+ * regions (for other types the value here will always equal
+ * zero).
+ */
+ __u64 cpu_visible_size;
+ /**
+ * @cpu_visible_used: Estimate of CPU visible memory used, in
+ * bytes.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero. Note this is only currently tracked for
+ * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
+ * here will always be zero).
+ */
+ __u64 cpu_visible_used;
+ /** @reserved: Reserved */
+ __u64 reserved[6];
+};
+
+/**
+ * struct drm_xe_query_mem_regions - describe memory regions
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
+ * struct drm_xe_query_mem_regions in .data.
+ */
+struct drm_xe_query_mem_regions {
+ /** @num_mem_regions: number of memory regions returned in @mem_regions */
+ __u32 num_mem_regions;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @mem_regions: The returned memory regions for this device */
+ struct drm_xe_mem_region mem_regions[];
+};
+
+/**
+ * struct drm_xe_query_config - describe the device configuration
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
+ * struct drm_xe_query_config in .data.
+ *
+ * The index in @info can be:
+ * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
+ * and the device revision (next 8 bits)
+ * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
+ * configuration, see list below
+ *
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
+ * has usable VRAM
+ * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
+ * required by this device, typically SZ_4K or SZ_64K
+ * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
+ * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
+ * available exec queue priority
+ */
+struct drm_xe_query_config {
+ /** @num_params: number of parameters returned in info */
+ __u32 num_params;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
+#define DRM_XE_QUERY_CONFIG_FLAGS 1
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
+#define DRM_XE_QUERY_CONFIG_VA_BITS 3
+#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
+ /** @info: array of elements containing the config info */
+ __u64 info[];
+};
+
+/**
+ * struct drm_xe_gt - describe an individual GT.
+ *
+ * To be used with drm_xe_query_gt_list, which will return a list with all the
+ * existing GT individual descriptions.
+ * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
+ * implementing graphics and/or media operations.
+ *
+ * The index in @type can be:
+ * - %DRM_XE_QUERY_GT_TYPE_MAIN
+ * - %DRM_XE_QUERY_GT_TYPE_MEDIA
+ */
+struct drm_xe_gt {
+#define DRM_XE_QUERY_GT_TYPE_MAIN 0
+#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
+ /** @type: GT type: Main or Media */
+ __u16 type;
+ /** @tile_id: Tile ID where this GT lives (Information only) */
+ __u16 tile_id;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad[3];
+ /** @reference_clock: A clock frequency for timestamp */
+ __u32 reference_clock;
+ /**
+ * @near_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are nearest to the current engines
+ * of this GT.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 near_mem_regions;
+ /**
+ * @far_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are far from the engines of this GT.
+ * In general, they have extra indirections when compared to the
+ * @near_mem_regions. For a discrete device this could mean system
+ * memory and memory living in a different tile.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 far_mem_regions;
+ /** @reserved: Reserved */
+ __u64 reserved[8];
+};
+
+/**
+ * struct drm_xe_query_gt_list - A list with GT description items.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
+ * drm_xe_query_gt_list in .data.
+ */
+struct drm_xe_query_gt_list {
+ /** @num_gt: number of GT items returned in gt_list */
+ __u32 num_gt;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @gt_list: The GT list returned for this device */
+ struct drm_xe_gt gt_list[];
+};
+
+/**
+ * struct drm_xe_query_topology_mask - describe the topology mask of a GT
+ *
+ * This is the hardware topology which reflects the internal physical
+ * structure of the GPU.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
+ * struct drm_xe_query_topology_mask in .data.
+ *
+ * The @type can be:
+ * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
+ * (DSS) available for geometry operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for geometry.
+ * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
+ * (DSS) available for compute operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for compute.
+ * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
+ * available per Dual Sub Slices (DSS). For example a query response
+ * containing the following in mask:
+ * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 EU.
+ */
+struct drm_xe_query_topology_mask {
+ /** @gt_id: GT ID the mask is associated with */
+ __u16 gt_id;
+
+#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
+#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
+#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+ /** @type: type of mask */
+ __u16 type;
+
+ /** @num_bytes: number of bytes in requested mask */
+ __u32 num_bytes;
+
+ /** @mask: little-endian mask of @num_bytes */
+ __u8 mask[];
+};
+
+/**
+ * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
+ *
+ * If a query is made with a struct drm_xe_device_query where .query is equal to
+ * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
+ * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
+ * .data points to this allocated structure.
+ *
+ * The query returns the engine cycles, which along with GT's @reference_clock,
+ * can be used to calculate the engine timestamp. In addition the
+ * query returns a set of cpu timestamps that indicate when the command
+ * streamer cycle count was captured.
+ */
+struct drm_xe_query_engine_cycles {
+ /**
+ * @eci: This is input by the user and is the engine for which command
+ * streamer cycles is queried.
+ */
+ struct drm_xe_engine_class_instance eci;
+
+ /**
+ * @clockid: This is input by the user and is the reference clock id for
+ * CPU timestamp. For definition, see clock_gettime(2) and
+ * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
+ * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
+ */
+ __s32 clockid;
+
+ /** @width: Width of the engine cycle counter in bits. */
+ __u32 width;
+
+ /**
+ * @engine_cycles: Engine cycles as read from its register
+ * at 0x358 offset.
+ */
+ __u64 engine_cycles;
+
+ /**
+ * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
+ * reading the engine_cycles register using the reference clockid set by the
+ * user.
+ */
+ __u64 cpu_timestamp;
+
+ /**
+ * @cpu_delta: Time delta in ns captured around reading the lower dword
+ * of the engine_cycles register.
+ */
+ __u64 cpu_delta;
+};
+
+/**
+ * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
+ * structure to query device information
+ *
+ * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
+ * and sets the value in the query member. This determines the type of
+ * the structure provided by the driver in data, among struct drm_xe_query_*.
+ *
+ * The @query can be:
+ * - %DRM_XE_DEVICE_QUERY_ENGINES
+ * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
+ * - %DRM_XE_DEVICE_QUERY_CONFIG
+ * - %DRM_XE_DEVICE_QUERY_GT_LIST
+ * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
+ * configuration of the device such as information on slices, memory,
+ * caches, and so on. It is provided as a table of key / value
+ * attributes.
+ * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
+ * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
+ *
+ * If size is set to 0, the driver fills it with the required size for
+ * the requested type of data to query. If size is equal to the required
+ * size, the queried information is copied into data. If size is set to
+ * a value different from 0 and different from the required size, the
+ * IOCTL call returns -EINVAL.
+ *
+ * For example the following code snippet allows retrieving and printing
+ * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_query_engines *engines;
+ * struct drm_xe_device_query query = {
+ * .extensions = 0,
+ * .query = DRM_XE_DEVICE_QUERY_ENGINES,
+ * .size = 0,
+ * .data = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * engines = malloc(query.size);
+ * query.data = (uintptr_t)engines;
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * for (int i = 0; i < engines->num_engines; i++) {
+ * printf("Engine %d: %s\n", i,
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
+ * "UNKNOWN");
+ * }
+ * free(engines);
+ */
+struct drm_xe_device_query {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_DEVICE_QUERY_ENGINES 0
+#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
+#define DRM_XE_DEVICE_QUERY_CONFIG 2
+#define DRM_XE_DEVICE_QUERY_GT_LIST 3
+#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
+#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
+#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
+ /** @query: The type of data to query */
+ __u32 query;
+
+ /** @size: Size of the queried data */
+ __u32 size;
+
+ /** @data: Queried data is placed here */
+ __u64 data;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
+ * gem creation
+ *
+ * The @flags can be:
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
+ * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
+ * possible placement, ensure that the corresponding VRAM allocation
+ * will always use the CPU accessible part of VRAM. This is important
+ * for small-bar systems (on full-bar systems this gets turned into a
+ * noop).
+ * Note1: System memory can be used as an extra placement if the kernel
+ * should spill the allocation to system memory, if space can't be made
+ * available in the CPU accessible part of VRAM (giving the same
+ * behaviour as the i915 interface, see
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
+ * Note2: For clear-color CCS surfaces the kernel needs to read the
+ * clear-color value stored in the buffer, and on discrete platforms we
+ * need to use VRAM for display surfaces, therefore the kernel requires
+ * setting this flag for such objects, otherwise an error is thrown on
+ * small-bar systems.
+ *
+ * @cpu_caching supports the following values:
+ * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
+ * caching. On iGPU this can't be used for scanout surfaces. Currently
+ * not allowed for objects placed in VRAM.
+ * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
+ * is uncached. Scanout surfaces should likely use this. All objects
+ * that can be placed in VRAM must use this.
+ */
+struct drm_xe_gem_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @size: Size of the object to be created, must match region
+ * (system or vram) minimum alignment (&min_page_size).
+ */
+ __u64 size;
+
+ /**
+ * @placement: A mask of memory instances of where BO can be placed.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u32 placement;
+
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+ /**
+ * @flags: Flags, currently a mask of memory instances of where BO can
+ * be placed
+ */
+ __u32 flags;
+
+ /**
+ * @vm_id: Attached VM, if any
+ *
+ * If a VM is specified, this BO must:
+ *
+ * 1. Only ever be bound to that VM.
+ * 2. Cannot be exported as a PRIME fd.
+ */
+ __u32 vm_id;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+#define DRM_XE_GEM_CPU_CACHING_WB 1
+#define DRM_XE_GEM_CPU_CACHING_WC 2
+ /**
+ * @cpu_caching: The CPU caching mode to select for this object. If
+ * mmaping the object the mode selected here will also be used.
+ */
+ __u16 cpu_caching;
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ */
+struct drm_xe_gem_mmap_offset {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
+ *
+ * The @flags can be:
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
+ * exec submissions to its exec_queues that don't have an upper time
+ * limit on the job execution time. But exec submissions to these
+ * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
+ * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
+ * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * LR VMs can be created in recoverable page-fault mode using
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
+ * If that flag is omitted, the UMD can not rely on the slightly
+ * different per-VM overcommit semantics that are enabled by
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
+ * still enable recoverable pagefaults if supported by the device.
+ * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
+ * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
+ * demand when accessed, and also allows per-VM overcommit of memory.
+ * The xe driver internally uses recoverable pagefaults to implement
+ * this.
+ */
+struct drm_xe_vm_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
+ /** @flags: Flags */
+ __u32 flags;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
+ */
+struct drm_xe_vm_destroy {
+ /** @vm_id: VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_bind_op - run bind operations
+ *
+ * The @op can be:
+ * - %DRM_XE_VM_BIND_OP_MAP
+ * - %DRM_XE_VM_BIND_OP_UNMAP
+ * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
+ * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
+ * - %DRM_XE_VM_BIND_OP_PREFETCH
+ *
+ * and the @flags can be:
+ * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
+ * tables are setup with a special bit which indicates writes are
+ * dropped and all reads return zero. In the future, the NULL flags
+ * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ. This flag is intended to
+ * implement VK sparse bindings.
+ */
+struct drm_xe_vm_bind_op {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
+ */
+ __u32 obj;
+
+ /**
+ * @pat_index: The platform defined @pat_index to use for this mapping.
+ * The index basically maps to some predefined memory attributes,
+ * including things like caching, coherency, compression etc. The exact
+ * meaning of the pat_index is platform specific and defined in the
+ * Bspec and PRMs. When the KMD sets up the binding the index here is
+ * encoded into the ppGTT PTE.
+ *
+ * For coherency the @pat_index needs to be at least 1way coherent when
+ * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
+ * will extract the coherency mode from the @pat_index and reject if
+ * there is a mismatch (see note below for pre-MTL platforms).
+ *
+ * Note: On pre-MTL platforms there is only a caching mode and no
+ * explicit coherency mode, but on such hardware there is always a
+ * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
+ * CPU caches even with the caching mode set as uncached. It's only the
+ * display engine that is incoherent (on dgpu it must be in VRAM which
+ * is always mapped as WC on the CPU). However to keep the uapi somewhat
+ * consistent with newer platforms the KMD groups the different cache
+ * levels into the following coherency buckets on all pre-MTL platforms:
+ *
+ * ppGTT UC -> COH_NONE
+ * ppGTT WC -> COH_NONE
+ * ppGTT WT -> COH_NONE
+ * ppGTT WB -> COH_AT_LEAST_1WAY
+ *
+ * In practice UC/WC/WT should only ever used for scanout surfaces on
+ * such platforms (or perhaps in general for dma-buf if shared with
+ * another device) since it is only the display engine that is actually
+ * incoherent. Everything else should typically use WB given that we
+ * have a shared-LLC. On MTL+ this completely changes and the HW
+ * defines the coherency mode as part of the @pat_index, where
+ * incoherent GT access is possible.
+ *
+ * Note: For userptr and externally imported dma-buf the kernel expects
+ * either 1WAY or 2WAY for the @pat_index.
+ *
+ * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
+ * on the @pat_index. For such mappings there is no actual memory being
+ * mapped (the address in the PTE is invalid), so the various PAT memory
+ * attributes likely do not apply. Simply leaving as zero is one
+ * option (still a valid pat_index).
+ */
+ __u16 pat_index;
+
+ /** @pad: MBZ */
+ __u16 pad;
+
+ union {
+ /**
+ * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
+ * ignored for unbind
+ */
+ __u64 obj_offset;
+
+ /** @userptr: user pointer to bind on */
+ __u64 userptr;
+ };
+
+ /**
+ * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
+ */
+ __u64 range;
+
+ /** @addr: Address to operate on, MBZ for UNMAP_ALL */
+ __u64 addr;
+
+#define DRM_XE_VM_BIND_OP_MAP 0x0
+#define DRM_XE_VM_BIND_OP_UNMAP 0x1
+#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
+#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
+#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
+ /** @op: Bind operation to perform */
+ __u32 op;
+
+#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
+#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+ /** @flags: Bind flags */
+ __u32 flags;
+
+ /**
+ * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
+ * It is a region instance, not a mask.
+ * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
+ */
+ __u32 prefetch_mem_region_instance;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
+ *
+ * Below is an example of a minimal use of @drm_xe_vm_bind to
+ * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
+ * illustrate `userptr`. It can be synchronized by using the example
+ * provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * data = aligned_alloc(ALIGNMENT, BO_SIZE);
+ * struct drm_xe_vm_bind bind = {
+ * .vm_id = vm,
+ * .num_binds = 1,
+ * .bind.obj = 0,
+ * .bind.obj_offset = to_user_pointer(data),
+ * .bind.range = BO_SIZE,
+ * .bind.addr = BIND_ADDRESS,
+ * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
+ * .bind.flags = 0,
+ * .num_syncs = 1,
+ * .syncs = &sync,
+ * .exec_queue_id = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
+ *
+ */
+struct drm_xe_vm_bind {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /**
+ * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
+ * and exec queue must have same vm_id. If zero, the default VM bind engine
+ * is used.
+ */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @num_binds: number of binds in this IOCTL */
+ __u32 num_binds;
+
+ union {
+ /** @bind: used if num_binds == 1 */
+ struct drm_xe_vm_bind_op bind;
+
+ /**
+ * @vector_of_binds: userptr to array of struct
+ * drm_xe_vm_bind_op if num_binds > 1
+ */
+ __u64 vector_of_binds;
+ };
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @num_syncs: amount of syncs to wait on */
+ __u32 num_syncs;
+
+ /** @syncs: pointer to struct drm_xe_sync array */
+ __u64 syncs;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ *
+ * The example below shows how to use @drm_xe_exec_queue_create to create
+ * a simple exec_queue (no parallel submission) of class
+ * &DRM_XE_ENGINE_CLASS_RENDER.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_engine_class_instance instance = {
+ * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
+ * };
+ * struct drm_xe_exec_queue_create exec_queue_create = {
+ * .extensions = 0,
+ * .vm_id = vm,
+ * .num_bb_per_exec = 1,
+ * .num_eng_per_bb = 1,
+ * .instances = to_user_pointer(&instance),
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
+ *
+ */
+struct drm_xe_exec_queue_create {
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
+
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @width: submission width (number BB per exec) for this exec queue */
+ __u16 width;
+
+ /** @num_placements: number of valid placements for this exec queue */
+ __u16 num_placements;
+
+ /** @vm_id: VM to use for this exec queue */
+ __u32 vm_id;
+
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @exec_queue_id: Returned exec queue ID */
+ __u32 exec_queue_id;
+
+ /**
+ * @instances: user pointer to a 2-d array of struct
+ * drm_xe_engine_class_instance
+ *
+ * length = width (i) * num_placements (j)
+ * index = j + i * width
+ */
+ __u64 instances;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ */
+struct drm_xe_exec_queue_destroy {
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ *
+ * The @property can be:
+ * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
+ */
+struct drm_xe_exec_queue_get_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
+ /** @property: property to get */
+ __u32 property;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_sync - sync object
+ *
+ * The @type can be:
+ * - %DRM_XE_SYNC_TYPE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_USER_FENCE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_SYNC_FLAG_SIGNAL
+ *
+ * A minimal use of @drm_xe_sync looks like this:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_sync sync = {
+ * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * };
+ * struct drm_syncobj_create syncobj_create = { 0 };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
+ * sync.handle = syncobj_create.handle;
+ * ...
+ * use of &sync in drm_xe_exec or drm_xe_vm_bind
+ * ...
+ * struct drm_syncobj_wait wait = {
+ * .handles = &sync.handle,
+ * .timeout_nsec = INT64_MAX,
+ * .count_handles = 1,
+ * .flags = 0,
+ * .first_signaled = 0,
+ * .pad = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
+ */
+struct drm_xe_sync {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
+ /** @type: Type of the this sync object */
+ __u32 type;
+
+#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
+ /** @flags: Sync Flags */
+ __u32 flags;
+
+ union {
+ /** @handle: Handle for the object */
+ __u32 handle;
+
+ /**
+ * @addr: Address of user fence. When sync is passed in via exec
+ * IOCTL this is a GPU address in the VM. When sync passed in via
+ * VM bind IOCTL this is a user pointer. In either case, it is
+ * the users responsibility that this address is present and
+ * mapped when the user fence is signalled. Must be qword
+ * aligned.
+ */
+ __u64 addr;
+ };
+
+ /**
+ * @timeline_value: Input for the timeline sync object. Needs to be
+ * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
+ *
+ * This is an example to use @drm_xe_exec for execution of the object
+ * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
+ * (see example in @drm_xe_exec_queue_create). It can be synchronized
+ * by using the example provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_exec exec = {
+ * .exec_queue_id = exec_queue,
+ * .syncs = &sync,
+ * .num_syncs = 1,
+ * .address = BIND_ADDRESS,
+ * .num_batch_buffer = 1,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
+ *
+ */
+struct drm_xe_exec {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID for the batch buffer */
+ __u32 exec_queue_id;
+
+ /** @num_syncs: Amount of struct drm_xe_sync in array. */
+ __u32 num_syncs;
+
+ /** @syncs: Pointer to struct drm_xe_sync array. */
+ __u64 syncs;
+
+ /**
+ * @address: address of batch buffer if num_batch_buffer == 1 or an
+ * array of batch buffer addresses
+ */
+ __u64 address;
+
+ /**
+ * @num_batch_buffer: number of batch buffer in this exec, must match
+ * the width of the engine
+ */
+ __u16 num_batch_buffer;
+
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
+ *
+ * Wait on user fence, XE will wake-up on every HW engine interrupt in the
+ * instances list and check if user fence is complete::
+ *
+ * (*addr & MASK) OP (VALUE & MASK)
+ *
+ * Returns to user on user fence completion or timeout.
+ *
+ * The @op can be:
+ * - %DRM_XE_UFENCE_WAIT_OP_EQ
+ * - %DRM_XE_UFENCE_WAIT_OP_NEQ
+ * - %DRM_XE_UFENCE_WAIT_OP_GT
+ * - %DRM_XE_UFENCE_WAIT_OP_GTE
+ * - %DRM_XE_UFENCE_WAIT_OP_LT
+ * - %DRM_XE_UFENCE_WAIT_OP_LTE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
+ * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
+ *
+ * The @mask values can be for example:
+ * - 0xffu for u8
+ * - 0xffffu for u16
+ * - 0xffffffffu for u32
+ * - 0xffffffffffffffffu for u64
+ */
+struct drm_xe_wait_user_fence {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @addr: user pointer address to wait on, must qword aligned
+ */
+ __u64 addr;
+
+#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
+#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
+#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
+#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
+#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
+#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
+ /** @op: wait operation (type of comparison) */
+ __u16 op;
+
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
+ /** @flags: wait flags */
+ __u16 flags;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: compare value */
+ __u64 value;
+
+ /** @mask: comparison mask */
+ __u64 mask;
+
+ /**
+ * @timeout: how long to wait before bailing, value in nanoseconds.
+ * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
+ * it contains timeout expressed in nanoseconds to wait (fence will
+ * expire at now() + timeout).
+ * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
+ * will end at timeout (uses system MONOTONIC_CLOCK).
+ * Passing negative timeout leads to neverending wait.
+ *
+ * On relative timeout this value is updated with timeout left
+ * (for restarting the call in case of signal delivery).
+ * On absolute timeout this value stays intact (restarted call still
+ * expire at the same point of time).
+ */
+ __s64 timeout;
+
+ /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
+ __u32 exec_queue_id;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_XE_DRM_H_ */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 5f636b5afc..d44a8118b2 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -251,20 +251,22 @@ struct binder_extended_error {
__s32 param;
};
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
-#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
-#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
-#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
-#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
-#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
-#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
+enum {
+ BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
+ BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
+ BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
+ BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
+ BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
+ BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
+ BINDER_VERSION = _IOWR('b', 9, struct binder_version),
+ BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
+ BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
+ BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
+ BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info),
+ BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info),
+ BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32),
+ BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error),
+};
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 9204e4494b..6e25753015 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -116,6 +116,9 @@ enum batadv_icmp_packettype {
* only need routable IPv4 multicast packets we signed up for explicitly
* @BATADV_MCAST_WANT_NO_RTR6: we have no IPv6 multicast router and therefore
* only need routable IPv6 multicast packets we signed up for explicitly
+ * @BATADV_MCAST_HAVE_MC_PTYPE_CAPA: we can parse, receive and forward
+ * batman-adv multicast packets with a multicast tracker TVLV. And all our
+ * hard interfaces have an MTU of at least 1280 bytes.
*/
enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
@@ -123,6 +126,7 @@ enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
BATADV_MCAST_WANT_NO_RTR4 = 1UL << 3,
BATADV_MCAST_WANT_NO_RTR6 = 1UL << 4,
+ BATADV_MCAST_HAVE_MC_PTYPE_CAPA = 1UL << 5,
};
/* tt data subtypes */
@@ -174,14 +178,16 @@ enum batadv_bla_claimframe {
* @BATADV_TVLV_TT: translation table tvlv
* @BATADV_TVLV_ROAM: roaming advertisement tvlv
* @BATADV_TVLV_MCAST: multicast capability tvlv
+ * @BATADV_TVLV_MCAST_TRACKER: multicast tracker tvlv
*/
enum batadv_tvlv_type {
- BATADV_TVLV_GW = 0x01,
- BATADV_TVLV_DAT = 0x02,
- BATADV_TVLV_NC = 0x03,
- BATADV_TVLV_TT = 0x04,
- BATADV_TVLV_ROAM = 0x05,
- BATADV_TVLV_MCAST = 0x06,
+ BATADV_TVLV_GW = 0x01,
+ BATADV_TVLV_DAT = 0x02,
+ BATADV_TVLV_NC = 0x03,
+ BATADV_TVLV_TT = 0x04,
+ BATADV_TVLV_ROAM = 0x05,
+ BATADV_TVLV_MCAST = 0x06,
+ BATADV_TVLV_MCAST_TRACKER = 0x07,
};
#pragma pack(2)
@@ -488,6 +494,25 @@ struct batadv_bcast_packet {
};
/**
+ * struct batadv_mcast_packet - multicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
+ * @reserved: reserved byte for alignment
+ * @tvlv_len: length of the appended tvlv buffer (in bytes)
+ */
+struct batadv_mcast_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 reserved;
+ __be16 tvlv_len;
+ /* "4 bytes boundary + 2 bytes" long to make the payload after the
+ * following ethernet header again 4 bytes boundary aligned
+ */
+};
+
+/**
* struct batadv_coded_packet - network coded packet
* @packet_type: batman-adv packet type, part of the general header
* @version: batman-adv protocol version, part of the general header
@@ -628,6 +653,14 @@ struct batadv_tvlv_mcast_data {
__u8 reserved[3];
};
+/**
+ * struct batadv_tvlv_mcast_tracker - payload of a multicast tracker tvlv
+ * @num_dests: number of subsequent destination originator MAC addresses
+ */
+struct batadv_tvlv_mcast_tracker {
+ __be16 num_dests;
+};
+
#pragma pack()
#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index bda948a685..754e68ca87 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1074,9 +1074,11 @@ enum bpf_link_type {
BPF_LINK_TYPE_TCX = 11,
BPF_LINK_TYPE_UPROBE_MULTI = 12,
BPF_LINK_TYPE_NETKIT = 13,
- MAX_BPF_LINK_TYPE,
+ __MAX_BPF_LINK_TYPE,
};
+#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
+
enum bpf_perf_event_type {
BPF_PERF_EVENT_UNSPEC = 0,
BPF_PERF_EVENT_UPROBE = 1,
@@ -1200,6 +1202,9 @@ enum bpf_perf_event_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+/* The verifier internal test flag. Behavior is undefined */
+#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
@@ -6560,6 +6565,16 @@ struct bpf_link_info {
__u64 missed;
} kprobe_multi;
struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 path_size; /* in/out: real path size on success, including zero byte */
+ __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
__u32 type; /* enum bpf_perf_event_type */
__u32 :32;
union {
@@ -6889,6 +6904,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
+ BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
@@ -7154,40 +7170,31 @@ struct bpf_spin_lock {
};
struct bpf_timer {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
- __u64 :64;
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
- __u64 :64;
- __u64 :64;
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
- __u32 :32;
+ __u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {
diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h
deleted file mode 100644
index cbc1f5813f..0000000000
--- a/include/uapi/linux/bpfilter.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_BPFILTER_H
-#define _UAPI_LINUX_BPFILTER_H
-
-#include <linux/if.h>
-
-enum {
- BPFILTER_IPT_SO_SET_REPLACE = 64,
- BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65,
- BPFILTER_IPT_SET_MAX,
-};
-
-enum {
- BPFILTER_IPT_SO_GET_INFO = 64,
- BPFILTER_IPT_SO_GET_ENTRIES = 65,
- BPFILTER_IPT_SO_GET_REVISION_MATCH = 66,
- BPFILTER_IPT_SO_GET_REVISION_TARGET = 67,
- BPFILTER_IPT_GET_MAX,
-};
-
-#endif /* _UAPI_LINUX_BPFILTER_H */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index f8bc34a6bc..cdf6ad8721 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -92,6 +92,7 @@ struct btrfs_qgroup_limit {
* struct btrfs_qgroup_inherit.flags
*/
#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
+#define BTRFS_QGROUP_INHERIT_FLAGS_SUPP (BTRFS_QGROUP_INHERIT_SET_LIMITS)
struct btrfs_qgroup_inherit {
__u64 flags;
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
index 14bc6e7421..42066f4eb8 100644
--- a/include/uapi/linux/cxl_mem.h
+++ b/include/uapi/linux/cxl_mem.h
@@ -46,6 +46,7 @@
___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
___DEPRECATED(SCAN_MEDIA, "Scan Media"), \
___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \
+ ___C(GET_TIMESTAMP, "Get Timestamp"), \
___C(MAX, "invalid / last command")
#define ___C(a, b) CXL_MEM_COMMAND_ID_##a
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index b3c8383d34..130cae0d3e 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -139,6 +139,8 @@ enum devlink_command {
DEVLINK_CMD_SELFTESTS_GET, /* can dump */
DEVLINK_CMD_SELFTESTS_RUN,
+ DEVLINK_CMD_NOTIFY_FILTER_SET,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index 715a491d27..b4e947f9bf 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -179,6 +179,7 @@ enum dpll_a_pin {
DPLL_A_PIN_PHASE_ADJUST_MAX,
DPLL_A_PIN_PHASE_ADJUST,
DPLL_A_PIN_PHASE_OFFSET,
+ DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET,
__DPLL_A_PIN_MAX,
DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f7fba0dc87..06ef6b78b7 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1266,6 +1266,8 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
* @rsvd8: Reserved for future use; see the note on reserved space.
* @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
@@ -1285,7 +1287,8 @@ struct ethtool_rxfh {
__u32 indir_size;
__u32 key_size;
__u8 hfunc;
- __u8 rsvd8[3];
+ __u8 input_xfrm;
+ __u8 rsvd8[2];
__u32 rsvd32;
__u32 rss_config[];
};
@@ -1992,6 +1995,15 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WOL_MODE_COUNT 8
+/* RSS hash function data
+ * XOR the corresponding source and destination fields of each specified
+ * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
+ * calculation. Note that this XORing reduces the input set entropy and could
+ * be exploited to reduce the RSS queue spread.
+ */
+#define RXH_XFRM_SYM_XOR (1 << 0)
+#define RXH_XFRM_NO_CHANGE 0xff
+
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
@@ -2128,18 +2140,6 @@ enum ethtool_reset_flags {
* refused. For drivers: ignore this field (use kernel's
* __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
* be overwritten by kernel.
- * @supported: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features for which the interface
- * supports autonegotiation or auto-detection. Read-only.
- * @advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features that are advertised through
- * autonegotiation or enabled for auto-detection.
- * @lp_advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, and other
- * link features that the link partner advertised through
- * autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
* @master_slave_cfg: Master/slave port mode.
@@ -2181,6 +2181,21 @@ enum ethtool_reset_flags {
* %set_link_ksettings() should validate all fields other than @cmd
* and @link_mode_masks_nwords that are not described as read-only or
* deprecated, and must ignore all fields described as read-only.
+ *
+ * @link_mode_masks is divided into three bitfields, each of length
+ * @link_mode_masks_nwords:
+ * - supported: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features for which the interface
+ * supports autonegotiation or auto-detection. Read-only.
+ * - advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features that are advertised through
+ * autonegotiation or enabled for auto-detection.
+ * - lp_advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, and other
+ * link features that the link partner advertised through
+ * autonegotiation; 0 if unknown or not applicable. Read-only.
*/
struct ethtool_link_settings {
__u32 cmd;
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 73e2c10dc2..3f89074aa0 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -908,6 +908,7 @@ enum {
ETHTOOL_A_RSS_HFUNC, /* u32 */
ETHTOOL_A_RSS_INDIR, /* binary */
ETHTOOL_A_RSS_HKEY, /* binary */
+ ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */
__ETHTOOL_A_RSS_CNT,
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index da43810b74..48ad69f772 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -316,6 +316,7 @@ typedef int __bitwise __kernel_rwf_t;
#define PAGE_IS_SWAPPED (1 << 4)
#define PAGE_IS_PFNZERO (1 << 5)
#define PAGE_IS_HUGE (1 << 6)
+#define PAGE_IS_SOFT_DIRTY (1 << 7)
/*
* struct page_region - Page region with flags
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 2e23f99dc0..a5b743a2f7 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -757,6 +757,7 @@ enum {
MDBE_ATTR_VNI,
MDBE_ATTR_IFINDEX,
MDBE_ATTR_SRC_VNI,
+ MDBE_ATTR_STATE_MASK,
__MDBE_ATTR_MAX,
};
#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 29ff80da27..ab9bcff96e 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -461,6 +461,286 @@ enum in6_addr_gen_mode {
/* Bridge section */
+/**
+ * DOC: Bridge enum definition
+ *
+ * Please *note* that the timer values in the following section are expected
+ * in clock_t format, which is seconds multiplied by USER_HZ (generally
+ * defined as 100).
+ *
+ * @IFLA_BR_FORWARD_DELAY
+ * The bridge forwarding delay is the time spent in LISTENING state
+ * (before moving to LEARNING) and in LEARNING state (before moving
+ * to FORWARDING). Only relevant if STP is enabled.
+ *
+ * The valid values are between (2 * USER_HZ) and (30 * USER_HZ).
+ * The default value is (15 * USER_HZ).
+ *
+ * @IFLA_BR_HELLO_TIME
+ * The time between hello packets sent by the bridge, when it is a root
+ * bridge or a designated bridge. Only relevant if STP is enabled.
+ *
+ * The valid values are between (1 * USER_HZ) and (10 * USER_HZ).
+ * The default value is (2 * USER_HZ).
+ *
+ * @IFLA_BR_MAX_AGE
+ * The hello packet timeout is the time until another bridge in the
+ * spanning tree is assumed to be dead, after reception of its last hello
+ * message. Only relevant if STP is enabled.
+ *
+ * The valid values are between (6 * USER_HZ) and (40 * USER_HZ).
+ * The default value is (20 * USER_HZ).
+ *
+ * @IFLA_BR_AGEING_TIME
+ * Configure the bridge's FDB entries aging time. It is the time a MAC
+ * address will be kept in the FDB after a packet has been received from
+ * that address. After this time has passed, entries are cleaned up.
+ * Allow values outside the 802.1 standard specification for special cases:
+ *
+ * * 0 - entry never ages (all permanent)
+ * * 1 - entry disappears (no persistence)
+ *
+ * The default value is (300 * USER_HZ).
+ *
+ * @IFLA_BR_STP_STATE
+ * Turn spanning tree protocol on (*IFLA_BR_STP_STATE* > 0) or off
+ * (*IFLA_BR_STP_STATE* == 0) for this bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_PRIORITY
+ * Set this bridge's spanning tree priority, used during STP root bridge
+ * election.
+ *
+ * The valid values are between 0 and 65535.
+ *
+ * @IFLA_BR_VLAN_FILTERING
+ * Turn VLAN filtering on (*IFLA_BR_VLAN_FILTERING* > 0) or off
+ * (*IFLA_BR_VLAN_FILTERING* == 0). When disabled, the bridge will not
+ * consider the VLAN tag when handling packets.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_VLAN_PROTOCOL
+ * Set the protocol used for VLAN filtering.
+ *
+ * The valid values are 0x8100(802.1Q) or 0x88A8(802.1AD). The default value
+ * is 0x8100(802.1Q).
+ *
+ * @IFLA_BR_GROUP_FWD_MASK
+ * The group forwarding mask. This is the bitmask that is applied to
+ * decide whether to forward incoming frames destined to link-local
+ * addresses (of the form 01:80:C2:00:00:0X).
+ *
+ * The default value is 0, which means the bridge does not forward any
+ * link-local frames coming on this port.
+ *
+ * @IFLA_BR_ROOT_ID
+ * The bridge root id, read only.
+ *
+ * @IFLA_BR_BRIDGE_ID
+ * The bridge id, read only.
+ *
+ * @IFLA_BR_ROOT_PORT
+ * The bridge root port, read only.
+ *
+ * @IFLA_BR_ROOT_PATH_COST
+ * The bridge root path cost, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE
+ * The bridge topology change, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE_DETECTED
+ * The bridge topology change detected, read only.
+ *
+ * @IFLA_BR_HELLO_TIMER
+ * The bridge hello timer, read only.
+ *
+ * @IFLA_BR_TCN_TIMER
+ * The bridge tcn timer, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE_TIMER
+ * The bridge topology change timer, read only.
+ *
+ * @IFLA_BR_GC_TIMER
+ * The bridge gc timer, read only.
+ *
+ * @IFLA_BR_GROUP_ADDR
+ * Set the MAC address of the multicast group this bridge uses for STP.
+ * The address must be a link-local address in standard Ethernet MAC address
+ * format. It is an address of the form 01:80:C2:00:00:0X, with X in [0, 4..f].
+ *
+ * The default value is 0.
+ *
+ * @IFLA_BR_FDB_FLUSH
+ * Flush bridge's fdb dynamic entries.
+ *
+ * @IFLA_BR_MCAST_ROUTER
+ * Set bridge's multicast router if IGMP snooping is enabled.
+ * The valid values are:
+ *
+ * * 0 - disabled.
+ * * 1 - automatic (queried).
+ * * 2 - permanently enabled.
+ *
+ * The default value is 1.
+ *
+ * @IFLA_BR_MCAST_SNOOPING
+ * Turn multicast snooping on (*IFLA_BR_MCAST_SNOOPING* > 0) or off
+ * (*IFLA_BR_MCAST_SNOOPING* == 0).
+ *
+ * The default value is 1.
+ *
+ * @IFLA_BR_MCAST_QUERY_USE_IFADDR
+ * If enabled use the bridge's own IP address as source address for IGMP
+ * queries (*IFLA_BR_MCAST_QUERY_USE_IFADDR* > 0) or the default of 0.0.0.0
+ * (*IFLA_BR_MCAST_QUERY_USE_IFADDR* == 0).
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_QUERIER
+ * Enable (*IFLA_BR_MULTICAST_QUERIER* > 0) or disable
+ * (*IFLA_BR_MULTICAST_QUERIER* == 0) IGMP querier, ie sending of multicast
+ * queries by the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_HASH_ELASTICITY
+ * Set multicast database hash elasticity, It is the maximum chain length in
+ * the multicast hash table. This attribute is *deprecated* and the value
+ * is always 16.
+ *
+ * @IFLA_BR_MCAST_HASH_MAX
+ * Set maximum size of the multicast hash table
+ *
+ * The default value is 4096, the value must be a power of 2.
+ *
+ * @IFLA_BR_MCAST_LAST_MEMBER_CNT
+ * The Last Member Query Count is the number of Group-Specific Queries
+ * sent before the router assumes there are no local members. The Last
+ * Member Query Count is also the number of Group-and-Source-Specific
+ * Queries sent before the router assumes there are no listeners for a
+ * particular source.
+ *
+ * The default value is 2.
+ *
+ * @IFLA_BR_MCAST_STARTUP_QUERY_CNT
+ * The Startup Query Count is the number of Queries sent out on startup,
+ * separated by the Startup Query Interval.
+ *
+ * The default value is 2.
+ *
+ * @IFLA_BR_MCAST_LAST_MEMBER_INTVL
+ * The Last Member Query Interval is the Max Response Time inserted into
+ * Group-Specific Queries sent in response to Leave Group messages, and
+ * is also the amount of time between Group-Specific Query messages.
+ *
+ * The default value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_MEMBERSHIP_INTVL
+ * The interval after which the bridge will leave a group, if no membership
+ * reports for this group are received.
+ *
+ * The default value is (260 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERIER_INTVL
+ * The interval between queries sent by other routers. if no queries are
+ * seen after this delay has passed, the bridge will start to send its own
+ * queries (as if *IFLA_BR_MCAST_QUERIER_INTVL* was enabled).
+ *
+ * The default value is (255 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERY_INTVL
+ * The Query Interval is the interval between General Queries sent by
+ * the Querier.
+ *
+ * The default value is (125 * USER_HZ). The minimum value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
+ * The Max Response Time used to calculate the Max Resp Code inserted
+ * into the periodic General Queries.
+ *
+ * The default value is (10 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_STARTUP_QUERY_INTVL
+ * The interval between queries in the startup phase.
+ *
+ * The default value is (125 * USER_HZ) / 4. The minimum value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_NF_CALL_IPTABLES
+ * Enable (*NF_CALL_IPTABLES* > 0) or disable (*NF_CALL_IPTABLES* == 0)
+ * iptables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_NF_CALL_IP6TABLES
+ * Enable (*NF_CALL_IP6TABLES* > 0) or disable (*NF_CALL_IP6TABLES* == 0)
+ * ip6tables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_NF_CALL_ARPTABLES
+ * Enable (*NF_CALL_ARPTABLES* > 0) or disable (*NF_CALL_ARPTABLES* == 0)
+ * arptables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_VLAN_DEFAULT_PVID
+ * VLAN ID applied to untagged and priority-tagged incoming packets.
+ *
+ * The default value is 1. Setting to the special value 0 makes all ports of
+ * this bridge not have a PVID by default, which means that they will
+ * not accept VLAN-untagged traffic.
+ *
+ * @IFLA_BR_PAD
+ * Bridge attribute padding type for netlink message.
+ *
+ * @IFLA_BR_VLAN_STATS_ENABLED
+ * Enable (*IFLA_BR_VLAN_STATS_ENABLED* == 1) or disable
+ * (*IFLA_BR_VLAN_STATS_ENABLED* == 0) per-VLAN stats accounting.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_STATS_ENABLED
+ * Enable (*IFLA_BR_MCAST_STATS_ENABLED* > 0) or disable
+ * (*IFLA_BR_MCAST_STATS_ENABLED* == 0) multicast (IGMP/MLD) stats
+ * accounting.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_IGMP_VERSION
+ * Set the IGMP version.
+ *
+ * The valid values are 2 and 3. The default value is 2.
+ *
+ * @IFLA_BR_MCAST_MLD_VERSION
+ * Set the MLD version.
+ *
+ * The valid values are 1 and 2. The default value is 1.
+ *
+ * @IFLA_BR_VLAN_STATS_PER_PORT
+ * Enable (*IFLA_BR_VLAN_STATS_PER_PORT* == 1) or disable
+ * (*IFLA_BR_VLAN_STATS_PER_PORT* == 0) per-VLAN per-port stats accounting.
+ * Can be changed only when there are no port VLANs configured.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MULTI_BOOLOPT
+ * The multi_boolopt is used to control new boolean options to avoid adding
+ * new netlink attributes. You can look at ``enum br_boolopt_id`` for those
+ * options.
+ *
+ * @IFLA_BR_MCAST_QUERIER_STATE
+ * Bridge mcast querier states, read only.
+ *
+ * @IFLA_BR_FDB_N_LEARNED
+ * The number of dynamically learned FDB entries for the current bridge,
+ * read only.
+ *
+ * @IFLA_BR_FDB_MAX_LEARNED
+ * Set the number of max dynamically learned FDB entries for the current
+ * bridge.
+ */
enum {
IFLA_BR_UNSPEC,
IFLA_BR_FORWARD_DELAY,
@@ -522,11 +802,252 @@ struct ifla_bridge_id {
__u8 addr[6]; /* ETH_ALEN */
};
+/**
+ * DOC: Bridge mode enum definition
+ *
+ * @BRIDGE_MODE_HAIRPIN
+ * Controls whether traffic may be sent back out of the port on which it
+ * was received. This option is also called reflective relay mode, and is
+ * used to support basic VEPA (Virtual Ethernet Port Aggregator)
+ * capabilities. By default, this flag is turned off and the bridge will
+ * not forward traffic back out of the receiving port.
+ */
enum {
BRIDGE_MODE_UNSPEC,
BRIDGE_MODE_HAIRPIN,
};
+/**
+ * DOC: Bridge port enum definition
+ *
+ * @IFLA_BRPORT_STATE
+ * The operation state of the port. Here are the valid values.
+ *
+ * * 0 - port is in STP *DISABLED* state. Make this port completely
+ * inactive for STP. This is also called BPDU filter and could be used
+ * to disable STP on an untrusted port, like a leaf virtual device.
+ * The traffic forwarding is also stopped on this port.
+ * * 1 - port is in STP *LISTENING* state. Only valid if STP is enabled
+ * on the bridge. In this state the port listens for STP BPDUs and
+ * drops all other traffic frames.
+ * * 2 - port is in STP *LEARNING* state. Only valid if STP is enabled on
+ * the bridge. In this state the port will accept traffic only for the
+ * purpose of updating MAC address tables.
+ * * 3 - port is in STP *FORWARDING* state. Port is fully active.
+ * * 4 - port is in STP *BLOCKING* state. Only valid if STP is enabled on
+ * the bridge. This state is used during the STP election process.
+ * In this state, port will only process STP BPDUs.
+ *
+ * @IFLA_BRPORT_PRIORITY
+ * The STP port priority. The valid values are between 0 and 255.
+ *
+ * @IFLA_BRPORT_COST
+ * The STP path cost of the port. The valid values are between 1 and 65535.
+ *
+ * @IFLA_BRPORT_MODE
+ * Set the bridge port mode. See *BRIDGE_MODE_HAIRPIN* for more details.
+ *
+ * @IFLA_BRPORT_GUARD
+ * Controls whether STP BPDUs will be processed by the bridge port. By
+ * default, the flag is turned off to allow BPDU processing. Turning this
+ * flag on will disable the bridge port if a STP BPDU packet is received.
+ *
+ * If the bridge has Spanning Tree enabled, hostile devices on the network
+ * may send BPDU on a port and cause network failure. Setting *guard on*
+ * will detect and stop this by disabling the port. The port will be
+ * restarted if the link is brought down, or removed and reattached.
+ *
+ * @IFLA_BRPORT_PROTECT
+ * Controls whether a given port is allowed to become a root port or not.
+ * Only used when STP is enabled on the bridge. By default the flag is off.
+ *
+ * This feature is also called root port guard. If BPDU is received from a
+ * leaf (edge) port, it should not be elected as root port. This could
+ * be used if using STP on a bridge and the downstream bridges are not fully
+ * trusted; this prevents a hostile guest from rerouting traffic.
+ *
+ * @IFLA_BRPORT_FAST_LEAVE
+ * This flag allows the bridge to immediately stop multicast traffic
+ * forwarding on a port that receives an IGMP Leave message. It is only used
+ * when IGMP snooping is enabled on the bridge. By default the flag is off.
+ *
+ * @IFLA_BRPORT_LEARNING
+ * Controls whether a given port will learn *source* MAC addresses from
+ * received traffic or not. Also controls whether dynamic FDB entries
+ * (which can also be added by software) will be refreshed by incoming
+ * traffic. By default this flag is on.
+ *
+ * @IFLA_BRPORT_UNICAST_FLOOD
+ * Controls whether unicast traffic for which there is no FDB entry will
+ * be flooded towards this port. By default this flag is on.
+ *
+ * @IFLA_BRPORT_PROXYARP
+ * Enable proxy ARP on this port.
+ *
+ * @IFLA_BRPORT_LEARNING_SYNC
+ * Controls whether a given port will sync MAC addresses learned on device
+ * port to bridge FDB.
+ *
+ * @IFLA_BRPORT_PROXYARP_WIFI
+ * Enable proxy ARP on this port which meets extended requirements by
+ * IEEE 802.11 and Hotspot 2.0 specifications.
+ *
+ * @IFLA_BRPORT_ROOT_ID
+ *
+ * @IFLA_BRPORT_BRIDGE_ID
+ *
+ * @IFLA_BRPORT_DESIGNATED_PORT
+ *
+ * @IFLA_BRPORT_DESIGNATED_COST
+ *
+ * @IFLA_BRPORT_ID
+ *
+ * @IFLA_BRPORT_NO
+ *
+ * @IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
+ *
+ * @IFLA_BRPORT_CONFIG_PENDING
+ *
+ * @IFLA_BRPORT_MESSAGE_AGE_TIMER
+ *
+ * @IFLA_BRPORT_FORWARD_DELAY_TIMER
+ *
+ * @IFLA_BRPORT_HOLD_TIMER
+ *
+ * @IFLA_BRPORT_FLUSH
+ * Flush bridge ports' fdb dynamic entries.
+ *
+ * @IFLA_BRPORT_MULTICAST_ROUTER
+ * Configure the port's multicast router presence. A port with
+ * a multicast router will receive all multicast traffic.
+ * The valid values are:
+ *
+ * * 0 disable multicast routers on this port
+ * * 1 let the system detect the presence of routers (default)
+ * * 2 permanently enable multicast traffic forwarding on this port
+ * * 3 enable multicast routers temporarily on this port, not depending
+ * on incoming queries.
+ *
+ * @IFLA_BRPORT_PAD
+ *
+ * @IFLA_BRPORT_MCAST_FLOOD
+ * Controls whether a given port will flood multicast traffic for which
+ * there is no MDB entry. By default this flag is on.
+ *
+ * @IFLA_BRPORT_MCAST_TO_UCAST
+ * Controls whether a given port will replicate packets using unicast
+ * instead of multicast. By default this flag is off.
+ *
+ * This is done by copying the packet per host and changing the multicast
+ * destination MAC to a unicast one accordingly.
+ *
+ * *mcast_to_unicast* works on top of the multicast snooping feature of the
+ * bridge. Which means unicast copies are only delivered to hosts which
+ * are interested in unicast and signaled this via IGMP/MLD reports previously.
+ *
+ * This feature is intended for interface types which have a more reliable
+ * and/or efficient way to deliver unicast packets than broadcast ones
+ * (e.g. WiFi).
+ *
+ * However, it should only be enabled on interfaces where no IGMPv2/MLDv1
+ * report suppression takes place. IGMP/MLD report suppression issue is
+ * usually overcome by the network daemon (supplicant) enabling AP isolation
+ * and by that separating all STAs.
+ *
+ * Delivery of STA-to-STA IP multicast is made possible again by enabling
+ * and utilizing the bridge hairpin mode, which considers the incoming port
+ * as a potential outgoing port, too (see *BRIDGE_MODE_HAIRPIN* option).
+ * Hairpin mode is performed after multicast snooping, therefore leading
+ * to only deliver reports to STAs running a multicast router.
+ *
+ * @IFLA_BRPORT_VLAN_TUNNEL
+ * Controls whether vlan to tunnel mapping is enabled on the port.
+ * By default this flag is off.
+ *
+ * @IFLA_BRPORT_BCAST_FLOOD
+ * Controls flooding of broadcast traffic on the given port. By default
+ * this flag is on.
+ *
+ * @IFLA_BRPORT_GROUP_FWD_MASK
+ * Set the group forward mask. This is a bitmask that is applied to
+ * decide whether to forward incoming frames destined to link-local
+ * addresses. The addresses of the form are 01:80:C2:00:00:0X (defaults
+ * to 0, which means the bridge does not forward any link-local frames
+ * coming on this port).
+ *
+ * @IFLA_BRPORT_NEIGH_SUPPRESS
+ * Controls whether neighbor discovery (arp and nd) proxy and suppression
+ * is enabled on the port. By default this flag is off.
+ *
+ * @IFLA_BRPORT_ISOLATED
+ * Controls whether a given port will be isolated, which means it will be
+ * able to communicate with non-isolated ports only. By default this
+ * flag is off.
+ *
+ * @IFLA_BRPORT_BACKUP_PORT
+ * Set a backup port. If the port loses carrier all traffic will be
+ * redirected to the configured backup port. Set the value to 0 to disable
+ * it.
+ *
+ * @IFLA_BRPORT_MRP_RING_OPEN
+ *
+ * @IFLA_BRPORT_MRP_IN_OPEN
+ *
+ * @IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT
+ * The number of per-port EHT hosts limit. The default value is 512.
+ * Setting to 0 is not allowed.
+ *
+ * @IFLA_BRPORT_MCAST_EHT_HOSTS_CNT
+ * The current number of tracked hosts, read only.
+ *
+ * @IFLA_BRPORT_LOCKED
+ * Controls whether a port will be locked, meaning that hosts behind the
+ * port will not be able to communicate through the port unless an FDB
+ * entry with the unit's MAC address is in the FDB. The common use case is
+ * that hosts are allowed access through authentication with the IEEE 802.1X
+ * protocol or based on whitelists. By default this flag is off.
+ *
+ * Please note that secure 802.1X deployments should always use the
+ * *BR_BOOLOPT_NO_LL_LEARN* flag, to not permit the bridge to populate its
+ * FDB based on link-local (EAPOL) traffic received on the port.
+ *
+ * @IFLA_BRPORT_MAB
+ * Controls whether a port will use MAC Authentication Bypass (MAB), a
+ * technique through which select MAC addresses may be allowed on a locked
+ * port, without using 802.1X authentication. Packets with an unknown source
+ * MAC address generates a "locked" FDB entry on the incoming bridge port.
+ * The common use case is for user space to react to these bridge FDB
+ * notifications and optionally replace the locked FDB entry with a normal
+ * one, allowing traffic to pass for whitelisted MAC addresses.
+ *
+ * Setting this flag also requires *IFLA_BRPORT_LOCKED* and
+ * *IFLA_BRPORT_LEARNING*. *IFLA_BRPORT_LOCKED* ensures that unauthorized
+ * data packets are dropped, and *IFLA_BRPORT_LEARNING* allows the dynamic
+ * FDB entries installed by user space (as replacements for the locked FDB
+ * entries) to be refreshed and/or aged out.
+ *
+ * @IFLA_BRPORT_MCAST_N_GROUPS
+ *
+ * @IFLA_BRPORT_MCAST_MAX_GROUPS
+ * Sets the maximum number of MDB entries that can be registered for a
+ * given port. Attempts to register more MDB entries at the port than this
+ * limit allows will be rejected, whether they are done through netlink
+ * (e.g. the bridge tool), or IGMP or MLD membership reports. Setting a
+ * limit of 0 disables the limit. The default value is 0.
+ *
+ * @IFLA_BRPORT_NEIGH_VLAN_SUPPRESS
+ * Controls whether neighbor discovery (arp and nd) proxy and suppression is
+ * enabled for a given port. By default this flag is off.
+ *
+ * Note that this option only takes effect when *IFLA_BRPORT_NEIGH_SUPPRESS*
+ * is enabled for a given port.
+ *
+ * @IFLA_BRPORT_BACKUP_NHID
+ * The FDB nexthop object ID to attach to packets being redirected to a
+ * backup port that has VLAN tunnel mapping enabled (via the
+ * *IFLA_BRPORT_VLAN_TUNNEL* option). Setting a value of 0 (default) has
+ * the effect of not attaching any ID.
+ */
enum {
IFLA_BRPORT_UNSPEC,
IFLA_BRPORT_STATE, /* Spanning tree state */
@@ -856,6 +1377,7 @@ enum {
IFLA_VXLAN_DF,
IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */
IFLA_VXLAN_LOCALBYPASS,
+ IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -873,6 +1395,13 @@ enum ifla_vxlan_df {
VXLAN_DF_MAX = __VXLAN_DF_END - 1,
};
+enum ifla_vxlan_label_policy {
+ VXLAN_LABEL_FIXED = 0,
+ VXLAN_LABEL_INHERIT = 1,
+ __VXLAN_LABEL_END,
+ VXLAN_LABEL_MAX = __VXLAN_LABEL_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index 8d48863472..d316984104 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -33,7 +33,13 @@
#define XDP_USE_SG (1 << 4)
/* Flags for xsk_umem_config flags */
-#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
+#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
+
+/* Force checksum calculation in software. Can be used for testing or
+ * working around potential HW issues. This option causes performance
+ * degradation and only works in XDP_COPY mode.
+ */
+#define XDP_UMEM_TX_SW_CSUM (1 << 1)
struct sockaddr_xdp {
__u16 sxdp_family;
@@ -76,6 +82,7 @@ struct xdp_umem_reg {
__u32 chunk_size;
__u32 headroom;
__u32 flags;
+ __u32 tx_metadata_len;
};
struct xdp_statistics {
@@ -105,6 +112,41 @@ struct xdp_options {
#define XSK_UNALIGNED_BUF_ADDR_MASK \
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
+/* Request transmit timestamp. Upon completion, put it into tx_timestamp
+ * field of struct xsk_tx_metadata.
+ */
+#define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0)
+
+/* Request transmit checksum offload. Checksum start position and offset
+ * are communicated via csum_start and csum_offset fields of struct
+ * xsk_tx_metadata.
+ */
+#define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
+
+/* AF_XDP offloads request. 'request' union member is consumed by the driver
+ * when the packet is being transmitted. 'completion' union member is
+ * filled by the driver when the transmit completion arrives.
+ */
+struct xsk_tx_metadata {
+ __u64 flags;
+
+ union {
+ struct {
+ /* XDP_TXMD_FLAGS_CHECKSUM */
+
+ /* Offset from desc->addr where checksumming should start. */
+ __u16 csum_start;
+ /* Offset from csum_start where checksum should be stored. */
+ __u16 csum_offset;
+ } request;
+
+ struct {
+ /* XDP_TXMD_FLAGS_TIMESTAMP */
+ __u64 tx_timestamp;
+ } completion;
+ };
+};
+
/* Rx/Tx descriptor */
struct xdp_desc {
__u64 addr;
@@ -121,4 +163,7 @@ struct xdp_desc {
*/
#define XDP_PKT_CONTD (1 << 0)
+/* TX packet carries valid metadata. */
+#define XDP_TX_METADATA (1 << 1)
+
#endif /* _LINUX_IF_XDP_H */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 9c2ffdcd66..f2e0b2d50e 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -105,6 +105,8 @@ enum iio_modifier {
IIO_MOD_PITCH,
IIO_MOD_YAW,
IIO_MOD_ROLL,
+ IIO_MOD_LIGHT_UVA,
+ IIO_MOD_LIGHT_UVB,
};
enum iio_event_type {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 022a520e31..03edf2ccdf 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -602,6 +602,7 @@
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
+#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index f1c16f8177..7a673b5282 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -71,6 +71,7 @@ struct io_uring_sqe {
__u32 uring_cmd_flags;
__u32 waitid_flags;
__u32 futex_flags;
+ __u32 install_fd_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -253,6 +254,7 @@ enum io_uring_op {
IORING_OP_FUTEX_WAIT,
IORING_OP_FUTEX_WAKE,
IORING_OP_FUTEX_WAITV,
+ IORING_OP_FIXED_FD_INSTALL,
/* this goes last, obviously */
IORING_OP_LAST,
@@ -387,6 +389,13 @@ enum {
#define IORING_MSG_RING_FLAGS_PASS (1U << 1)
/*
+ * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags)
+ *
+ * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC
+ */
+#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
@@ -558,6 +567,9 @@ enum {
/* register a range of fixed file slots for automatic slot allocation */
IORING_REGISTER_FILE_ALLOC_RANGE = 25,
+ /* return status information for a buffer group */
+ IORING_REGISTER_PBUF_STATUS = 26,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -684,6 +696,13 @@ struct io_uring_buf_reg {
__u64 resv[3];
};
+/* argument for IORING_REGISTER_PBUF_STATUS */
+struct io_uring_buf_status {
+ __u32 buf_group; /* input */
+ __u32 head; /* output */
+ __u32 resv[8];
+};
+
/*
* io_uring_restriction->opcode values
*/
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 0b2bc6252e..1dfeaa2e64 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -49,6 +49,7 @@ enum {
IOMMUFD_CMD_GET_HW_INFO,
IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
+ IOMMUFD_CMD_HWPT_INVALIDATE,
};
/**
@@ -613,4 +614,82 @@ struct iommu_hwpt_get_dirty_bitmap {
#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
+/**
+ * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
+ * Data Type
+ * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ */
+enum iommu_hwpt_invalidate_data_type {
+ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
+ * stage-1 cache invalidation
+ * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
+ * to all-levels page structure cache or just
+ * the leaf PTE cache.
+ */
+enum iommu_hwpt_vtd_s1_invalidate_flags {
+ IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
+ * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
+ * @addr: The start address of the range to be invalidated. It needs to
+ * be 4KB aligned.
+ * @npages: Number of contiguous 4K pages to be invalidated.
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
+ * @__reserved: Must be 0
+ *
+ * The Intel VT-d specific invalidation data for user-managed stage-1 cache
+ * invalidation in nested translation. Userspace uses this structure to
+ * tell the impacted cache scope after modifying the stage-1 page table.
+ *
+ * Invalidating all the caches related to the page table by setting @addr
+ * to be 0 and @npages to be U64_MAX.
+ *
+ * The device TLB will be invalidated automatically if ATS is enabled.
+ */
+struct iommu_hwpt_vtd_s1_invalidate {
+ __aligned_u64 addr;
+ __aligned_u64 npages;
+ __u32 flags;
+ __u32 __reserved;
+};
+
+/**
+ * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
+ * @size: sizeof(struct iommu_hwpt_invalidate)
+ * @hwpt_id: ID of a nested HWPT for cache invalidation
+ * @data_uptr: User pointer to an array of driver-specific cache invalidation
+ * data.
+ * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
+ * type of all the entries in the invalidation request array. It
+ * should be a type supported by the hwpt pointed by @hwpt_id.
+ * @entry_len: Length (in bytes) of a request entry in the request array
+ * @entry_num: Input the number of cache invalidation requests in the array.
+ * Output the number of requests successfully handled by kernel.
+ * @__reserved: Must be 0.
+ *
+ * Invalidate the iommu cache for user-managed page table. Modifications on a
+ * user-managed page table should be followed by this operation to sync cache.
+ * Each ioctl can support one or more cache invalidation requests in the array
+ * that has a total size of @entry_len * @entry_num.
+ *
+ * An empty invalidation request array by setting @entry_num==0 is allowed, and
+ * @entry_len and @data_uptr would be ignored in this case. This can be used to
+ * check if the given @data_type is supported or not by kernel.
+ */
+struct iommu_hwpt_invalidate {
+ __u32 size;
+ __u32 hwpt_id;
+ __aligned_u64 data_uptr;
+ __u32 data_type;
+ __u32 entry_len;
+ __u32 entry_num;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
#endif
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 01766dd839..c17bb096ea 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -25,6 +25,7 @@
#define KEXEC_FILE_UNLOAD 0x00000001
#define KEXEC_FILE_ON_CRASH 0x00000002
#define KEXEC_FILE_NO_INITRAMFS 0x00000004
+#define KEXEC_FILE_DEBUG 0x00000008
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 211b86de35..c330853648 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -16,76 +16,6 @@
#define KVM_API_VERSION 12
-/* *** Deprecated interfaces *** */
-
-#define KVM_TRC_SHIFT 16
-
-#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
-#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
-
-#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
-#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
-#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
-
-#define KVM_TRC_HEAD_SIZE 12
-#define KVM_TRC_CYCLE_SIZE 8
-#define KVM_TRC_EXTRA_MAX 7
-
-#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
-#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
-#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
-#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
-#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
-#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
-#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
-#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
-#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
-#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
-#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
-#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
-#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
-#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
-#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
-#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
-#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
-#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
-#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
-#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
-#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
-#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
-#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
-#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
-
-struct kvm_user_trace_setup {
- __u32 buf_size;
- __u32 buf_nr;
-};
-
-#define __KVM_DEPRECATED_MAIN_W_0x06 \
- _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
-#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
-#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
-
-#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
-
-struct kvm_breakpoint {
- __u32 enabled;
- __u32 padding;
- __u64 address;
-};
-
-struct kvm_debug_guest {
- __u32 enabled;
- __u32 pad;
- struct kvm_breakpoint breakpoints[4];
- __u32 singlestep;
-};
-
-#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
-
-/* *** End of deprecated interfaces *** */
-
-
/* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region {
__u32 slot;
@@ -95,6 +25,19 @@ struct kvm_userspace_memory_region {
__u64 userspace_addr; /* start of the userspace allocated memory */
};
+/* for KVM_SET_USER_MEMORY_REGION2 */
+struct kvm_userspace_memory_region2 {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size;
+ __u64 userspace_addr;
+ __u64 guest_memfd_offset;
+ __u32 guest_memfd;
+ __u32 pad1;
+ __u64 pad2[14];
+};
+
/*
* The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
* userspace, other bits are reserved for kvm internal use which are defined
@@ -102,6 +45,7 @@ struct kvm_userspace_memory_region {
*/
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
+#define KVM_MEM_GUEST_MEMFD (1UL << 2)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
@@ -265,6 +209,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38
+#define KVM_EXIT_MEMORY_FAULT 39
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -518,6 +463,13 @@ struct kvm_run {
#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
__u32 flags;
} notify;
+ /* KVM_EXIT_MEMORY_FAULT */
+ struct {
+#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
+ __u64 flags;
+ __u64 gpa;
+ __u64 size;
+ } memory_fault;
/* Fix the size of the union. */
char padding[256];
};
@@ -945,9 +897,6 @@ struct kvm_ppc_resize_hpt {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
-#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
-#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
@@ -1201,6 +1150,11 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
+#define KVM_CAP_USER_MEMORY2 231
+#define KVM_CAP_MEMORY_FAULT_INFO 232
+#define KVM_CAP_MEMORY_ATTRIBUTES 233
+#define KVM_CAP_GUEST_MEMFD 234
+#define KVM_CAP_VM_TYPES 235
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1291,6 +1245,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
+#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
struct kvm_xen_hvm_config {
__u32 flags;
@@ -1483,6 +1438,8 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
+ struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
@@ -1507,20 +1464,8 @@ struct kvm_s390_ucas_mapping {
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
-#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
- struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
-/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
-#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
-#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
-#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
- struct kvm_assigned_pci_dev)
-#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
- struct kvm_assigned_msix_nr)
-#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
- struct kvm_assigned_msix_entry)
-#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
@@ -1537,9 +1482,6 @@ struct kvm_s390_ucas_mapping {
* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
-/* Available with KVM_CAP_PCI_2_3 */
-#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
- struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
@@ -1592,8 +1534,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
-/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
-#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
@@ -2267,4 +2207,24 @@ struct kvm_s390_zpci_op {
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
+/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
+#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
+
+struct kvm_memory_attributes {
+ __u64 address;
+ __u64 size;
+ __u64 attributes;
+ __u64 flags;
+};
+
+#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
+
+#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
+
+struct kvm_create_guest_memfd {
+ __u64 size;
+ __u64 flags;
+ __u64 reserved[6];
+};
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h
new file mode 100644
index 0000000000..f8aef9ade5
--- /dev/null
+++ b/include/uapi/linux/lsm.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Linux Security Modules (LSM) - User space API
+ *
+ * Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef _UAPI_LINUX_LSM_H
+#define _UAPI_LINUX_LSM_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+/**
+ * struct lsm_ctx - LSM context information
+ * @id: the LSM id number, see LSM_ID_XXX
+ * @flags: LSM specific flags
+ * @len: length of the lsm_ctx struct, @ctx and any other data or padding
+ * @ctx_len: the size of @ctx
+ * @ctx: the LSM context value
+ *
+ * The @len field MUST be equal to the size of the lsm_ctx struct
+ * plus any additional padding and/or data placed after @ctx.
+ *
+ * In all cases @ctx_len MUST be equal to the length of @ctx.
+ * If @ctx is a string value it should be nul terminated with
+ * @ctx_len equal to `strlen(@ctx) + 1`. Binary values are
+ * supported.
+ *
+ * The @flags and @ctx fields SHOULD only be interpreted by the
+ * LSM specified by @id; they MUST be set to zero/0 when not used.
+ */
+struct lsm_ctx {
+ __u64 id;
+ __u64 flags;
+ __u64 len;
+ __u64 ctx_len;
+ __u8 ctx[] __counted_by(ctx_len);
+};
+
+/*
+ * ID tokens to identify Linux Security Modules (LSMs)
+ *
+ * These token values are used to uniquely identify specific LSMs
+ * in the kernel as well as in the kernel's LSM userspace API.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ID_UNDEF 0
+#define LSM_ID_CAPABILITY 100
+#define LSM_ID_SELINUX 101
+#define LSM_ID_SMACK 102
+#define LSM_ID_TOMOYO 103
+#define LSM_ID_APPARMOR 104
+#define LSM_ID_YAMA 105
+#define LSM_ID_LOADPIN 106
+#define LSM_ID_SAFESETID 107
+#define LSM_ID_LOCKDOWN 108
+#define LSM_ID_BPF 109
+#define LSM_ID_LANDLOCK 110
+
+/*
+ * LSM_ATTR_XXX definitions identify different LSM attributes
+ * which are used in the kernel's LSM userspace API. Support
+ * for these attributes vary across the different LSMs. None
+ * are required.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ATTR_UNDEF 0
+#define LSM_ATTR_CURRENT 100
+#define LSM_ATTR_EXEC 101
+#define LSM_ATTR_FSCREATE 102
+#define LSM_ATTR_KEYCREATE 103
+#define LSM_ATTR_PREV 104
+#define LSM_ATTR_SOCKCREATE 105
+
+/*
+ * LSM_FLAG_XXX definitions identify special handling instructions
+ * for the API.
+ */
+#define LSM_FLAG_SINGLE 0x0001
+
+#endif /* _UAPI_LINUX_LSM_H */
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index 171c5cce36..68a0272e99 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -100,14 +100,14 @@ struct mei_connect_client_data_vtag {
* a FW client on a tagged channel. From this point on, every read
* and write will communicate with the associated FW client
* on the tagged channel.
- * Upone close() the communication is terminated.
+ * Upon close() the communication is terminated.
*
* The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
*
* The input parameter is UUID of the FW Client, a vtag [0,255].
* The output parameter is the properties of the FW client
- * (FW protocool version and max message size).
+ * (FW protocol version and max message size).
*
* Clients that do not support tagged connection
* will respond with -EOPNOTSUPP.
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index bb242fdcfe..ad5478dbad 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -138,4 +138,74 @@ struct mount_attr {
/* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
+
+/*
+ * Structure for getting mount/superblock/filesystem info with statmount(2).
+ *
+ * The interface is similar to statx(2): individual fields or groups can be
+ * selected with the @mask argument of statmount(). Kernel will set the @mask
+ * field according to the supported fields.
+ *
+ * If string fields are selected, then the caller needs to pass a buffer that
+ * has space after the fixed part of the structure. Nul terminated strings are
+ * copied there and offsets relative to @str are stored in the relevant fields.
+ * If the buffer is too small, then EOVERFLOW is returned. The actually used
+ * size is returned in @size.
+ */
+struct statmount {
+ __u32 size; /* Total size, including strings */
+ __u32 __spare1;
+ __u64 mask; /* What results were written */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 fs_type; /* [str] Filesystem type */
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u32 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u32 mnt_point; /* [str] Mountpoint relative to current root */
+ __u64 __spare2[50];
+ char str[]; /* Variable size part containing strings */
+};
+
+/*
+ * Structure for passing mount ID and miscellaneous parameters to statmount(2)
+ * and listmount(2).
+ *
+ * For statmount(2) @param represents the request mask.
+ * For listmount(2) @param represents the last listed mount id (or zero).
+ */
+struct mnt_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 mnt_id;
+ __u64 param;
+};
+
+/* List of all mnt_id_req versions. */
+#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+
+/*
+ * @mask bits for statmount(2)
+ */
+#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
+#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+
+/*
+ * Special @mnt_id values that can be passed to listmount
+ */
+#define LSMT_ROOT 0xffffffffffffffff /* root mount */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index a6451561f3..74cfe49689 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -57,6 +57,7 @@ struct mptcp_info {
__u64 mptcpi_bytes_sent;
__u64 mptcpi_bytes_received;
__u64 mptcpi_bytes_acked;
+ __u8 mptcpi_subflows_total;
};
/* MPTCP Reset reason codes, rfc8684 */
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
index b5d11aece4..50589e5dd6 100644
--- a/include/uapi/linux/mptcp_pm.h
+++ b/include/uapi/linux/mptcp_pm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
-/* Documentation/netlink/specs/mptcp.yaml */
+/* Documentation/netlink/specs/mptcp_pm.yaml */
/* YNL-GEN uapi header */
#ifndef _UAPI_LINUX_MPTCP_PM_H
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 2943a151d4..93cb411adf 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -44,13 +44,30 @@ enum netdev_xdp_act {
* timestamp via bpf_xdp_metadata_rx_timestamp().
* @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
* hash via bpf_xdp_metadata_rx_hash().
+ * @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive
+ * packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
*/
enum netdev_xdp_rx_metadata {
NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
NETDEV_XDP_RX_METADATA_HASH = 2,
+ NETDEV_XDP_RX_METADATA_VLAN_TAG = 4,
+};
- /* private: */
- NETDEV_XDP_RX_METADATA_MASK = 3,
+/**
+ * enum netdev_xsk_flags
+ * @NETDEV_XSK_FLAGS_TX_TIMESTAMP: HW timestamping egress packets is supported
+ * by the driver.
+ * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the
+ * driver.
+ */
+enum netdev_xsk_flags {
+ NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1,
+ NETDEV_XSK_FLAGS_TX_CHECKSUM = 2,
+};
+
+enum netdev_queue_type {
+ NETDEV_QUEUE_TYPE_RX,
+ NETDEV_QUEUE_TYPE_TX,
};
enum {
@@ -59,21 +76,80 @@ enum {
NETDEV_A_DEV_XDP_FEATURES,
NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
+ NETDEV_A_DEV_XSK_FEATURES,
__NETDEV_A_DEV_MAX,
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
};
enum {
+ NETDEV_A_PAGE_POOL_ID = 1,
+ NETDEV_A_PAGE_POOL_IFINDEX,
+ NETDEV_A_PAGE_POOL_NAPI_ID,
+ NETDEV_A_PAGE_POOL_INFLIGHT,
+ NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
+ NETDEV_A_PAGE_POOL_DETACH_TIME,
+
+ __NETDEV_A_PAGE_POOL_MAX,
+ NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
+};
+
+enum {
+ NETDEV_A_PAGE_POOL_STATS_INFO = 1,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
+
+ __NETDEV_A_PAGE_POOL_STATS_MAX,
+ NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1)
+};
+
+enum {
+ NETDEV_A_NAPI_IFINDEX = 1,
+ NETDEV_A_NAPI_ID,
+ NETDEV_A_NAPI_IRQ,
+ NETDEV_A_NAPI_PID,
+
+ __NETDEV_A_NAPI_MAX,
+ NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
+};
+
+enum {
+ NETDEV_A_QUEUE_ID = 1,
+ NETDEV_A_QUEUE_IFINDEX,
+ NETDEV_A_QUEUE_TYPE,
+ NETDEV_A_QUEUE_NAPI_ID,
+
+ __NETDEV_A_QUEUE_MAX,
+ NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
NETDEV_CMD_DEV_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_GET,
+ NETDEV_CMD_PAGE_POOL_ADD_NTF,
+ NETDEV_CMD_PAGE_POOL_DEL_NTF,
+ NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_STATS_GET,
+ NETDEV_CMD_QUEUE_GET,
+ NETDEV_CMD_NAPI_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
};
#define NETDEV_MCGRP_MGMT "mgmt"
+#define NETDEV_MCGRP_PAGE_POOL "page-pool"
#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index dced2c49da..1ccdcae243 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -72,7 +72,7 @@
* For drivers supporting TDLS with external setup (WIPHY_FLAG_SUPPORTS_TDLS
* and WIPHY_FLAG_TDLS_EXTERNAL_SETUP), the station lifetime is as follows:
* - a setup station entry is added, not yet authorized, without any rate
- * or capability information, this just exists to avoid race conditions
+ * or capability information; this just exists to avoid race conditions
* - when the TDLS setup is done, a single NL80211_CMD_SET_STATION is valid
* to add rate and capability information to the station and at the same
* time mark it authorized.
@@ -87,7 +87,7 @@
* DOC: Frame transmission/registration support
*
* Frame transmission and registration support exists to allow userspace
- * management entities such as wpa_supplicant react to management frames
+ * management entities such as wpa_supplicant to react to management frames
* that are not being handled by the kernel. This includes, for example,
* certain classes of action frames that cannot be handled in the kernel
* for various reasons.
@@ -113,7 +113,7 @@
*
* Frame transmission allows userspace to send for example the required
* responses to action frames. It is subject to some sanity checking,
- * but many frames can be transmitted. When a frame was transmitted, its
+ * but many frames can be transmitted. When a frame is transmitted, its
* status is indicated to the sending socket.
*
* For more technical details, see the corresponding command descriptions
@@ -123,7 +123,7 @@
/**
* DOC: Virtual interface / concurrency capabilities
*
- * Some devices are able to operate with virtual MACs, they can have
+ * Some devices are able to operate with virtual MACs; they can have
* more than one virtual interface. The capability handling for this
* is a bit complex though, as there may be a number of restrictions
* on the types of concurrency that are supported.
@@ -135,7 +135,7 @@
* Once concurrency is desired, more attributes must be observed:
* To start with, since some interface types are purely managed in
* software, like the AP-VLAN type in mac80211 for example, there's
- * an additional list of these, they can be added at any time and
+ * an additional list of these; they can be added at any time and
* are only restricted by some semantic restrictions (e.g. AP-VLAN
* cannot be added without a corresponding AP interface). This list
* is exported in the %NL80211_ATTR_SOFTWARE_IFTYPES attribute.
@@ -164,7 +164,7 @@
* Packet coalesce feature helps to reduce number of received interrupts
* to host by buffering these packets in firmware/hardware for some
* predefined time. Received interrupt will be generated when one of the
- * following events occur.
+ * following events occurs.
* a) Expiration of hardware timer whose expiration time is set to maximum
* coalescing delay of matching coalesce rule.
* b) Coalescing buffer in hardware reaches its limit.
@@ -174,7 +174,7 @@
* rule.
* a) Maximum coalescing delay
* b) List of packet patterns which needs to be matched
- * c) Condition for coalescence. pattern 'match' or 'no match'
+ * c) Condition for coalescence: pattern 'match' or 'no match'
* Multiple such rules can be created.
*/
@@ -213,7 +213,7 @@
/**
* DOC: FILS shared key authentication offload
*
- * FILS shared key authentication offload can be advertized by drivers by
+ * FILS shared key authentication offload can be advertised by drivers by
* setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
* FILS shared key authentication offload should be able to construct the
* authentication and association frames for FILS shared key authentication and
@@ -239,7 +239,7 @@
* The PMKSA can be maintained in userspace persistently so that it can be used
* later after reboots or wifi turn off/on also.
*
- * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertised by a FILS
* capable AP supporting PMK caching. It specifies the scope within which the
* PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
* %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
@@ -290,12 +290,12 @@
* If the configuration needs to be applied for specific peer then the MAC
* address of the peer needs to be passed in %NL80211_ATTR_MAC, otherwise the
* configuration will be applied for all the connected peers in the vif except
- * any peers that have peer specific configuration for the TID by default; if
- * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer specific values
+ * any peers that have peer-specific configuration for the TID by default; if
+ * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer-specific values
* will be overwritten.
*
- * All this configuration is valid only for STA's current connection
- * i.e. the configuration will be reset to default when the STA connects back
+ * All this configuration is valid only for STA's current connection,
+ * i.e., the configuration will be reset to default when the STA connects back
* after disconnection/roaming, and this configuration will be cleared when
* the interface goes down.
*/
@@ -521,7 +521,7 @@
* %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is
* not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified,
* scheduled scan will run in an infinite loop with the specified interval.
- * These attributes are mutually exculsive,
+ * These attributes are mutually exclusive,
* i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if
* NL80211_ATTR_SCHED_SCAN_PLANS is defined.
* If for some reason scheduled scan is aborted by the driver, all scan
@@ -552,7 +552,7 @@
* %NL80211_CMD_STOP_SCHED_SCAN command is received or when the interface
* is brought down while a scheduled scan was running.
*
- * @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation
+ * @NL80211_CMD_GET_SURVEY: get survey results, e.g. channel occupation
* or noise level
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
@@ -563,12 +563,13 @@
* using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
* %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
* authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
- * advertized by a FILS capable AP identifying the scope of PMKSA in an
+ * advertised by a FILS capable AP identifying the scope of PMKSA in an
* ESS.
* @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
* (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
* %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
- * authentication.
+ * authentication. Additionally in case of SAE offload and OWE offloads
+ * PMKSA entry can be deleted using %NL80211_ATTR_SSID.
* @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
*
* @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -607,7 +608,7 @@
* BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify
* the SSID (mainly for association, but is included in authentication
* request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ +
- * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequence of the
+ * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequency of the
* channel in MHz. %NL80211_ATTR_AUTH_TYPE is used to specify the
* authentication type. %NL80211_ATTR_IE is used to define IEs
* (VendorSpecificInfo, but also including RSN IE and FT IEs) to be added
@@ -816,7 +817,7 @@
* reached.
* @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ
* and the attributes determining channel width) the given interface
- * (identifed by %NL80211_ATTR_IFINDEX) shall operate on.
+ * (identified by %NL80211_ATTR_IFINDEX) shall operate on.
* In case multiple channels are supported by the device, the mechanism
* with which it switches channels is implementation-defined.
* When a monitor interface is given, it can only switch channel while
@@ -888,7 +889,7 @@
* inform userspace of the new replay counter.
*
* @NL80211_CMD_PMKSA_CANDIDATE: This is used as an event to inform userspace
- * of PMKSA caching dandidates.
+ * of PMKSA caching candidates.
*
* @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
* In addition, this can be used as an event to request userspace to take
@@ -924,7 +925,7 @@
*
* @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface
* by sending a null data frame to it and reporting when the frame is
- * acknowleged. This is used to allow timing out inactive clients. Uses
+ * acknowledged. This is used to allow timing out inactive clients. Uses
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a
* direct reply with an %NL80211_ATTR_COOKIE that is later used to match
* up the event with the request. The event includes the same data and
@@ -1135,11 +1136,15 @@
* @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
* configured PMK for the authenticator address identified by
* %NL80211_ATTR_MAC.
- * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates an 802.1X FT roam was
- * completed successfully. Drivers that support 4 way handshake offload
- * should send this event after indicating 802.1X FT assocation with
- * %NL80211_CMD_ROAM. If the 4 way handshake failed %NL80211_CMD_DISCONNECT
- * should be indicated instead.
+ * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates port is authorized and
+ * open for regular data traffic. For STA/P2P-client, this event is sent
+ * with AP MAC address and for AP/P2P-GO, the event carries the STA/P2P-
+ * client MAC address.
+ * Drivers that support 4 way handshake offload should send this event for
+ * STA/P2P-client after successful 4-way HS or after 802.1X FT following
+ * NL80211_CMD_CONNECT or NL80211_CMD_ROAM. Drivers using AP/P2P-GO 4-way
+ * handshake offload should send this event on successful completion of
+ * 4-way handshake with the peer (STA/P2P-client).
* @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
* and RX notification. This command is used both as a request to transmit
* a control port frame and as a notification that a control port frame
@@ -1323,6 +1328,11 @@
* Multi-Link reconfiguration. %NL80211_ATTR_MLO_LINKS is used to provide
* information about the removed STA MLD setup links.
*
+ * @NL80211_CMD_SET_TID_TO_LINK_MAPPING: Set the TID to Link Mapping for a
+ * non-AP MLD station. The %NL80211_ATTR_MLO_TTLM_DLINK and
+ * %NL80211_ATTR_MLO_TTLM_ULINK attributes are used to specify the
+ * TID to Link mapping for downlink/uplink traffic.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1578,6 +1588,8 @@ enum nl80211_commands {
NL80211_CMD_LINKS_REMOVED,
+ NL80211_CMD_SET_TID_TO_LINK_MAPPING,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1835,7 +1847,7 @@ enum nl80211_commands {
* using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is
* to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER
* flag. When used with %NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, pre-auth
- * frames are not forwared over the control port.
+ * frames are not forwarded over the control port.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@@ -1972,10 +1984,10 @@ enum nl80211_commands {
* bit. Depending on which antennas are selected in the bitmap, 802.11n
* drivers can derive which chainmasks to use (if all antennas belonging to
* a particular chain are disabled this chain should be disabled) and if
- * a chain has diversity antennas wether diversity should be used or not.
+ * a chain has diversity antennas whether diversity should be used or not.
* HT capabilities (STBC, TX Beamforming, Antenna selection) can be
* derived from the available chains after applying the antenna mask.
- * Non-802.11n drivers can derive wether to use diversity or not.
+ * Non-802.11n drivers can derive whether to use diversity or not.
* Drivers may reject configurations or RX/TX mask combinations they cannot
* support by returning -EINVAL.
*
@@ -2545,7 +2557,7 @@ enum nl80211_commands {
* from successful FILS authentication and is used with
* %NL80211_CMD_CONNECT.
*
- * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertised by a FILS AP
* identifying the scope of PMKSAs. This is used with
* @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
*
@@ -2826,6 +2838,19 @@ enum nl80211_commands {
* @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is
* disabled.
*
+ * @NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA: Include BSS usage data, i.e.
+ * include BSSes that can only be used in restricted scenarios and/or
+ * cannot be used at all.
+ *
+ * @NL80211_ATTR_MLO_TTLM_DLINK: Binary attribute specifying the downlink TID to
+ * link mapping. The length is 8 * sizeof(u16). For each TID the link
+ * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
+ * in Draft P802.11be_D4.0.
+ * @NL80211_ATTR_MLO_TTLM_ULINK: Binary attribute specifying the uplink TID to
+ * link mapping. The length is 8 * sizeof(u16). For each TID the link
+ * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
+ * in Draft P802.11be_D4.0.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3364,6 +3389,11 @@ enum nl80211_attrs {
NL80211_ATTR_MLO_LINK_DISABLED,
+ NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA,
+
+ NL80211_ATTR_MLO_TTLM_DLINK,
+ NL80211_ATTR_MLO_TTLM_ULINK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4170,7 +4200,7 @@ enum nl80211_wmm_rule {
* (100 * dBm).
* @NL80211_FREQUENCY_ATTR_DFS_STATE: current state for DFS
* (enum nl80211_dfs_state)
- * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in miliseconds for how long
+ * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in milliseconds for how long
* this channel is in this DFS state.
* @NL80211_FREQUENCY_ATTR_NO_HT40_MINUS: HT40- isn't possible with this
* channel as the control channel
@@ -4226,6 +4256,14 @@ enum nl80211_wmm_rule {
* in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_PSD: Power spectral density (in dBm) that
* is allowed on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_DFS_CONCURRENT: Operation on this channel is
+ * allowed for peer-to-peer or adhoc communication under the control
+ * of a DFS master which operates on the same channel (FCC-594280 D01
+ * Section B.3). Should be used together with %NL80211_RRF_DFS only.
+ * @NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT: Client connection to VLP AP
+ * not allowed using this channel
+ * @NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT: Client connection to AFC AP
+ * not allowed using this channel
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4265,6 +4303,9 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_320MHZ,
NL80211_FREQUENCY_ATTR_NO_EHT,
NL80211_FREQUENCY_ATTR_PSD,
+ NL80211_FREQUENCY_ATTR_DFS_CONCURRENT,
+ NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT,
+ NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4470,6 +4511,12 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed
* @NL80211_RRF_NO_EHT: EHT operation not allowed
* @NL80211_RRF_PSD: Ruleset has power spectral density value
+ * @NL80211_RRF_DFS_CONCURRENT: Operation on this channel is allowed for
+ peer-to-peer or adhoc communication under the control of a DFS master
+ which operates on the same channel (FCC-594280 D01 Section B.3).
+ Should be used together with %NL80211_RRF_DFS only.
+ * @NL80211_RRF_NO_UHB_VLP_CLIENT: Client connection to VLP AP not allowed
+ * @NL80211_RRF_NO_UHB_AFC_CLIENT: Client connection to AFC AP not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -4491,6 +4538,9 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_320MHZ = 1<<18,
NL80211_RRF_NO_EHT = 1<<19,
NL80211_RRF_PSD = 1<<20,
+ NL80211_RRF_DFS_CONCURRENT = 1<<21,
+ NL80211_RRF_NO_UHB_VLP_CLIENT = 1<<22,
+ NL80211_RRF_NO_UHB_AFC_CLIENT = 1<<23,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -5028,6 +5078,33 @@ enum nl80211_bss_scan_width {
};
/**
+ * enum nl80211_bss_use_for - bitmap indicating possible BSS use
+ * @NL80211_BSS_USE_FOR_NORMAL: Use this BSS for normal "connection",
+ * including IBSS/MBSS depending on the type.
+ * @NL80211_BSS_USE_FOR_MLD_LINK: This BSS can be used as a link in an
+ * MLO connection. Note that for an MLO connection, all links including
+ * the assoc link must have this flag set, and the assoc link must
+ * additionally have %NL80211_BSS_USE_FOR_NORMAL set.
+ */
+enum nl80211_bss_use_for {
+ NL80211_BSS_USE_FOR_NORMAL = 1 << 0,
+ NL80211_BSS_USE_FOR_MLD_LINK = 1 << 1,
+};
+
+/**
+ * enum nl80211_bss_cannot_use_reasons - reason(s) connection to a
+ * BSS isn't possible
+ * @NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY: NSTR nonprimary links aren't
+ * supported by the device, and this BSS entry represents one.
+ * @NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH: STA is not supporting
+ * the AP power type (SP, VLP, AP) that the AP uses.
+ */
+enum nl80211_bss_cannot_use_reasons {
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1 << 0,
+ NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 1 << 1,
+};
+
+/**
* enum nl80211_bss - netlink attributes for a BSS
*
* @__NL80211_BSS_INVALID: invalid
@@ -5079,6 +5156,14 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz
* @NL80211_BSS_MLO_LINK_ID: MLO link ID of the BSS (u8).
* @NL80211_BSS_MLD_ADDR: MLD address of this BSS if connected to it.
+ * @NL80211_BSS_USE_FOR: u32 bitmap attribute indicating what the BSS can be
+ * used for, see &enum nl80211_bss_use_for.
+ * @NL80211_BSS_CANNOT_USE_REASONS: Indicates the reason that this BSS cannot
+ * be used for all or some of the possible uses by the device reporting it,
+ * even though its presence was detected.
+ * This is a u64 attribute containing a bitmap of values from
+ * &enum nl80211_cannot_use_reasons, note that the attribute may be missing
+ * if no reasons are specified.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -5106,6 +5191,8 @@ enum nl80211_bss {
NL80211_BSS_FREQUENCY_OFFSET,
NL80211_BSS_MLO_LINK_ID,
NL80211_BSS_MLD_ADDR,
+ NL80211_BSS_USE_FOR,
+ NL80211_BSS_CANNOT_USE_REASONS,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -5454,7 +5541,7 @@ enum nl80211_tx_rate_setting {
* (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE).
* @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but
* per peer instead.
- * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if set indicates
+ * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribute, if set indicates
* that the new configuration overrides all previous peer
* configurations, otherwise previous peer specific configurations
* should be left untouched.
@@ -5837,7 +5924,7 @@ enum nl80211_attr_coalesce_rule {
/**
* enum nl80211_coalesce_condition - coalesce rule conditions
- * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns
+ * @NL80211_COALESCE_CONDITION_MATCH: coalesce Rx packets when patterns
* in a rule are matched.
* @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns
* in a rule are not matched.
@@ -5936,7 +6023,7 @@ enum nl80211_if_combination_attrs {
* enum nl80211_plink_state - state of a mesh peer link finite state machine
*
* @NL80211_PLINK_LISTEN: initial state, considered the implicit
- * state of non existent mesh peer links
+ * state of non-existent mesh peer links
* @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to
* this mesh peer
* @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received
@@ -6229,7 +6316,7 @@ enum nl80211_feature_flags {
* request to use RRM (see %NL80211_ATTR_USE_RRM) with
* %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
* the ASSOC_REQ_USE_RRM flag in the association request even if
- * NL80211_FEATURE_QUIET is not advertized.
+ * NL80211_FEATURE_QUIET is not advertised.
* @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air
* sniffer which means that it can be configured to hear packets from
* certain groups which can be configured by the
@@ -6241,13 +6328,15 @@ enum nl80211_feature_flags {
* the BSS that the interface that requested the scan is connected to
* (if available).
* @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the
- * time the last beacon/probe was received. The time is the TSF of the
- * BSS that the interface that requested the scan is connected to
- * (if available).
+ * time the last beacon/probe was received. For a non-MLO connection, the
+ * time is the TSF of the BSS that the interface that requested the scan is
+ * connected to (if available). For an MLO connection, the time is the TSF
+ * of the BSS corresponding with link ID specified in the scan request (if
+ * specified).
* @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
* channel dwell time.
* @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
- * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * configuration (AP/mesh), supporting a legacy (non-HT/VHT) rate.
* @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
* configuration (AP/mesh) with HT rates.
* @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
@@ -6426,6 +6515,11 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_OWE_OFFLOAD_AP: Driver/Device wants to do OWE DH IE
* handling in AP mode.
*
+ * @NL80211_EXT_FEATURE_DFS_CONCURRENT: The device supports peer-to-peer or
+ * ad hoc operation on DFS channels under the control of a concurrent
+ * DFS master on the same channel as described in FCC-594280 D01
+ * (Section B.3). This, for example, allows P2P GO and P2P clients to
+ * operate on DFS channels as long as there's a concurrent BSS connection.
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6499,6 +6593,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA,
NL80211_EXT_FEATURE_OWE_OFFLOAD,
NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -6583,7 +6678,7 @@ enum nl80211_timeout_reason {
* request parameters IE in the probe request
* @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at
- * rate of at least 5.5M. In case non OCE AP is discovered in the channel,
+ * rate of at least 5.5M. In case non-OCE AP is discovered in the channel,
* only the first probe req in the channel will be sent in high rate.
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request
* tx deferral (dot11FILSProbeDelay shall be set to 15ms)
@@ -6619,7 +6714,7 @@ enum nl80211_timeout_reason {
* received on the 2.4/5 GHz channels to actively scan only the 6GHz
* channels on which APs are expected to be found. Note that when not set,
* the scan logic would scan all 6GHz channels, but since transmission of
- * probe requests on non PSC channels is limited, it is highly likely that
+ * probe requests on non-PSC channels is limited, it is highly likely that
* these channels would passively be scanned. Also note that when the flag
* is set, in addition to the colocated APs, PSC channels would also be
* scanned if the user space has asked for it.
@@ -6951,7 +7046,7 @@ enum nl80211_nan_func_term_reason {
* The instance ID for the follow up Service Discovery Frame. This is u8.
* @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type
* is follow up. This is a u8.
- * The requestor instance ID for the follow up Service Discovery Frame.
+ * The requester instance ID for the follow up Service Discovery Frame.
* @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the
* follow up Service Discovery Frame. This is a binary attribute.
* @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a
@@ -7341,7 +7436,7 @@ enum nl80211_peer_measurement_attrs {
* @NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED: flag attribute indicating if
* trigger based ranging measurement is supported
* @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating
- * if non trigger based ranging measurement is supported
+ * if non-trigger-based ranging measurement is supported
*
* @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
@@ -7395,7 +7490,7 @@ enum nl80211_peer_measurement_ftm_capa {
* if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
* %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
* ranging will be used.
- * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non trigger based
+ * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non-trigger-based
* ranging measurement (flag)
* This attribute and %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED are
* mutually exclusive.
@@ -7473,7 +7568,7 @@ enum nl80211_peer_measurement_ftm_failure_reasons {
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames
* transmitted (u32, optional)
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames
- * that were acknowleged (u32, optional)
+ * that were acknowledged (u32, optional)
* @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the
* busy peer (u32, seconds)
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent
diff --git a/include/uapi/linux/nsm.h b/include/uapi/linux/nsm.h
new file mode 100644
index 0000000000..e529f232f6
--- /dev/null
+++ b/include/uapi/linux/nsm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef __UAPI_LINUX_NSM_H
+#define __UAPI_LINUX_NSM_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define NSM_MAGIC 0x0A
+
+#define NSM_REQUEST_MAX_SIZE 0x1000
+#define NSM_RESPONSE_MAX_SIZE 0x3000
+
+struct nsm_iovec {
+ __u64 addr; /* Virtual address of target buffer */
+ __u64 len; /* Length of target buffer */
+};
+
+/* Raw NSM message. Only available with CAP_SYS_ADMIN. */
+struct nsm_raw {
+ /* Request from user */
+ struct nsm_iovec request;
+ /* Response to user */
+ struct nsm_iovec response;
+};
+#define NSM_IOCTL_RAW _IOWR(NSM_MAGIC, 0x0, struct nsm_raw)
+
+#endif /* __UAPI_LINUX_NSM_H */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index f9c1af8d14..94b46b043b 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -11,7 +11,8 @@
#define __UAPI_LINUX_PCITEST_H
#define PCITEST_BAR _IO('P', 0x1)
-#define PCITEST_LEGACY_IRQ _IO('P', 0x2)
+#define PCITEST_INTX_IRQ _IO('P', 0x2)
+#define PCITEST_LEGACY_IRQ PCITEST_INTX_IRQ
#define PCITEST_MSI _IOW('P', 0x3, int)
#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
#define PCITEST_READ _IOW('P', 0x5, unsigned long)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 39c6a250dd..3a64499b0f 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+ PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -235,6 +237,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -982,6 +986,12 @@ enum perf_event_type {
* { u64 nr;
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
* { u64 from, to, flags } lbr[nr];
+ * #
+ * # The format of the counters is decided by the
+ * # "branch_counter_nr" and "branch_counter_width",
+ * # which are defined in the ABI.
+ * #
+ * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
* } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
@@ -1427,6 +1437,9 @@ struct perf_branch_entry {
reserved:31;
};
+/* Size of used info bits in struct perf_branch_entry */
+#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
+
union perf_sample_weight {
__u64 full;
#if defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index c7082cc60d..ea277039f8 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -99,7 +99,7 @@ enum {
* versions.
*/
#define TCA_ACT_GACT 5
-#define TCA_ACT_IPT 6
+#define TCA_ACT_IPT 6 /* obsoleted, can be reused */
#define TCA_ACT_PEDIT 7
#define TCA_ACT_MIRRED 8
#define TCA_ACT_NAT 9
@@ -120,7 +120,7 @@ enum tca_id {
TCA_ID_UNSPEC = 0,
TCA_ID_POLICE = 1,
TCA_ID_GACT = TCA_ACT_GACT,
- TCA_ID_IPT = TCA_ACT_IPT,
+ TCA_ID_IPT = TCA_ACT_IPT, /* Obsoleted, can be reused */
TCA_ID_PEDIT = TCA_ACT_PEDIT,
TCA_ID_MIRRED = TCA_ACT_MIRRED,
TCA_ID_NAT = TCA_ACT_NAT,
@@ -280,37 +280,6 @@ struct tc_u32_pcnt {
#define TC_U32_MAXDEPTH 8
-
-/* RSVP filter */
-
-enum {
- TCA_RSVP_UNSPEC,
- TCA_RSVP_CLASSID,
- TCA_RSVP_DST,
- TCA_RSVP_SRC,
- TCA_RSVP_PINFO,
- TCA_RSVP_POLICE,
- TCA_RSVP_ACT,
- __TCA_RSVP_MAX
-};
-
-#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
-
-struct tc_rsvp_gpi {
- __u32 key;
- __u32 mask;
- int offset;
-};
-
-struct tc_rsvp_pinfo {
- struct tc_rsvp_gpi dpi;
- struct tc_rsvp_gpi spi;
- __u8 protocol;
- __u8 tunnelid;
- __u8 tunnelhdr;
- __u8 pad;
-};
-
/* ROUTE filter */
enum {
@@ -341,22 +310,6 @@ enum {
#define TCA_FW_MAX (__TCA_FW_MAX - 1)
-/* TC index filter */
-
-enum {
- TCA_TCINDEX_UNSPEC,
- TCA_TCINDEX_HASH,
- TCA_TCINDEX_MASK,
- TCA_TCINDEX_SHIFT,
- TCA_TCINDEX_FALL_THROUGH,
- TCA_TCINDEX_CLASSID,
- TCA_TCINDEX_POLICE,
- TCA_TCINDEX_ACT,
- __TCA_TCINDEX_MAX
-};
-
-#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
-
/* Flow filter */
enum {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index f762a10bfb..a3cd0c2dc9 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -477,115 +477,6 @@ enum {
#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
-
-/* CBQ section */
-
-#define TC_CBQ_MAXPRIO 8
-#define TC_CBQ_MAXLEVEL 8
-#define TC_CBQ_DEF_EWMA 5
-
-struct tc_cbq_lssopt {
- unsigned char change;
- unsigned char flags;
-#define TCF_CBQ_LSS_BOUNDED 1
-#define TCF_CBQ_LSS_ISOLATED 2
- unsigned char ewma_log;
- unsigned char level;
-#define TCF_CBQ_LSS_FLAGS 1
-#define TCF_CBQ_LSS_EWMA 2
-#define TCF_CBQ_LSS_MAXIDLE 4
-#define TCF_CBQ_LSS_MINIDLE 8
-#define TCF_CBQ_LSS_OFFTIME 0x10
-#define TCF_CBQ_LSS_AVPKT 0x20
- __u32 maxidle;
- __u32 minidle;
- __u32 offtime;
- __u32 avpkt;
-};
-
-struct tc_cbq_wrropt {
- unsigned char flags;
- unsigned char priority;
- unsigned char cpriority;
- unsigned char __reserved;
- __u32 allot;
- __u32 weight;
-};
-
-struct tc_cbq_ovl {
- unsigned char strategy;
-#define TC_CBQ_OVL_CLASSIC 0
-#define TC_CBQ_OVL_DELAY 1
-#define TC_CBQ_OVL_LOWPRIO 2
-#define TC_CBQ_OVL_DROP 3
-#define TC_CBQ_OVL_RCLASSIC 4
- unsigned char priority2;
- __u16 pad;
- __u32 penalty;
-};
-
-struct tc_cbq_police {
- unsigned char police;
- unsigned char __res1;
- unsigned short __res2;
-};
-
-struct tc_cbq_fopt {
- __u32 split;
- __u32 defmap;
- __u32 defchange;
-};
-
-struct tc_cbq_xstats {
- __u32 borrows;
- __u32 overactions;
- __s32 avgidle;
- __s32 undertime;
-};
-
-enum {
- TCA_CBQ_UNSPEC,
- TCA_CBQ_LSSOPT,
- TCA_CBQ_WRROPT,
- TCA_CBQ_FOPT,
- TCA_CBQ_OVL_STRATEGY,
- TCA_CBQ_RATE,
- TCA_CBQ_RTAB,
- TCA_CBQ_POLICE,
- __TCA_CBQ_MAX,
-};
-
-#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
-
-/* dsmark section */
-
-enum {
- TCA_DSMARK_UNSPEC,
- TCA_DSMARK_INDICES,
- TCA_DSMARK_DEFAULT_INDEX,
- TCA_DSMARK_SET_TC_INDEX,
- TCA_DSMARK_MASK,
- TCA_DSMARK_VALUE,
- __TCA_DSMARK_MAX,
-};
-
-#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
-
-/* ATM section */
-
-enum {
- TCA_ATM_UNSPEC,
- TCA_ATM_FD, /* file/socket descriptor */
- TCA_ATM_PTR, /* pointer to descriptor - later */
- TCA_ATM_HDR, /* LL header */
- TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
- TCA_ATM_ADDR, /* PVC address (for output only) */
- TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
- __TCA_ATM_MAX,
-};
-
-#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
-
/* Network emulator */
enum {
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 6c0aa57773..5a43c23f53 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -2,15 +2,11 @@
/*
md_p.h : physical layout of Linux RAID devices
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_P_H
@@ -237,7 +233,7 @@ struct mdp_superblock_1 {
char set_name[32]; /* set and interpreted by user-space */
__le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
- __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
+ __le32 level; /* 0,1,4,5 */
__le32 layout; /* only for raid5 and raid10 currently */
__le64 size; /* used size of component devices, in 512byte sectors */
diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
index 1053072449..7be89a4906 100644
--- a/include/uapi/linux/raid/md_u.h
+++ b/include/uapi/linux/raid/md_u.h
@@ -2,15 +2,11 @@
/*
md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
Copyright (C) 1998 Ingo Molnar
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI_MD_U_H
@@ -107,11 +103,6 @@ typedef struct mdu_array_info_s {
} mdu_array_info_t;
-/* non-obvious values for 'level' */
-#define LEVEL_MULTIPATH (-4)
-#define LEVEL_LINEAR (-1)
-#define LEVEL_FAULTY (-5)
-
/* we need a value for 'no level specified' and 0
* means 'raid0', so we need something else. This is
* for internal use only
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index ac5d6a3031..4fc22908bc 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -2,7 +2,7 @@
#ifndef _UAPI_LINUX_RESOURCE_H
#define _UAPI_LINUX_RESOURCE_H
-#include <linux/time.h>
+#include <linux/time_types.h>
#include <linux/types.h>
/*
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 53bc1af67a..de9b473360 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -11,6 +11,7 @@
#ifndef _UAPI_LINUX_SERIAL_H
#define _UAPI_LINUX_SERIAL_H
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/tty_flags.h>
@@ -137,17 +138,20 @@ struct serial_icounter_struct {
* * %SER_RS485_ADDRB - Enable RS485 addressing mode.
* * %SER_RS485_ADDR_RECV - Receive address filter (enables @addr_recv). Requires %SER_RS485_ADDRB.
* * %SER_RS485_ADDR_DEST - Destination address (enables @addr_dest). Requires %SER_RS485_ADDRB.
+ * * %SER_RS485_MODE_RS422 - Enable RS422. Requires %SER_RS485_ENABLED.
*/
struct serial_rs485 {
__u32 flags;
-#define SER_RS485_ENABLED (1 << 0)
-#define SER_RS485_RTS_ON_SEND (1 << 1)
-#define SER_RS485_RTS_AFTER_SEND (1 << 2)
-#define SER_RS485_RX_DURING_TX (1 << 4)
-#define SER_RS485_TERMINATE_BUS (1 << 5)
-#define SER_RS485_ADDRB (1 << 6)
-#define SER_RS485_ADDR_RECV (1 << 7)
-#define SER_RS485_ADDR_DEST (1 << 8)
+#define SER_RS485_ENABLED _BITUL(0)
+#define SER_RS485_RTS_ON_SEND _BITUL(1)
+#define SER_RS485_RTS_AFTER_SEND _BITUL(2)
+/* Placeholder for bit 3: SER_RS485_RTS_BEFORE_SEND, which isn't used anymore */
+#define SER_RS485_RX_DURING_TX _BITUL(4)
+#define SER_RS485_TERMINATE_BUS _BITUL(5)
+#define SER_RS485_ADDRB _BITUL(6)
+#define SER_RS485_ADDR_RECV _BITUL(7)
+#define SER_RS485_ADDR_DEST _BITUL(8)
+#define SER_RS485_MODE_RS422 _BITUL(9)
__u32 delay_rts_before_send;
__u32 delay_rts_after_send;
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index 837fcd4b0a..b531e3ef01 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -160,6 +160,8 @@ enum {
SMC_NLA_LGR_D_CHID, /* u16 */
SMC_NLA_LGR_D_PAD, /* flag */
SMC_NLA_LGR_D_V2_COMMON, /* nest */
+ SMC_NLA_LGR_D_EXT_GID, /* u64 */
+ SMC_NLA_LGR_D_PEER_EXT_GID, /* u64 */
__SMC_NLA_LGR_D_MAX,
SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1
};
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index 8cb3a6fef5..58eceb7f5d 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -107,6 +107,8 @@ struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
__aligned_u64 my_gid; /* My GID */
__aligned_u64 token; /* Token of DMB */
__aligned_u64 peer_token; /* Token of remote DMBE */
+ __aligned_u64 peer_gid_ext; /* Peer GID (extended part) */
+ __aligned_u64 my_gid_ext; /* My GID (extended part) */
};
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7cab2c65d3..2f2ee82d55 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -154,6 +154,7 @@ struct statx {
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index ff0a931833..ff1f38889d 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -76,6 +76,27 @@ struct sync_file_info {
__u64 sync_fence_info;
};
+/**
+ * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ *
+ * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline
+ *
+ * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For
+ * example
+ *
+ * clock_gettime(CLOCK_MONOTONIC, &t);
+ * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline
+ */
+struct sync_set_deadline {
+ __u64 deadline_ns;
+ /* Not strictly needed for alignment but gives some possibility
+ * for future extension:
+ */
+ __u64 pad;
+};
+
#define SYNC_IOC_MAGIC '>'
/*
@@ -87,5 +108,6 @@ struct sync_file_info {
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
+#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline)
#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h
deleted file mode 100644
index c48d7da675..0000000000
--- a/include/uapi/linux/tc_act/tc_ipt.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_TC_IPT_H
-#define __LINUX_TC_IPT_H
-
-#include <linux/pkt_cls.h>
-
-enum {
- TCA_IPT_UNSPEC,
- TCA_IPT_TABLE,
- TCA_IPT_HOOK,
- TCA_IPT_INDEX,
- TCA_IPT_CNT,
- TCA_IPT_TM,
- TCA_IPT_TARG,
- TCA_IPT_PAD,
- __TCA_IPT_MAX
-};
-#define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
-
-#endif
diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h
index 2500a0005d..c61e76f3c2 100644
--- a/include/uapi/linux/tc_act/tc_mirred.h
+++ b/include/uapi/linux/tc_act/tc_mirred.h
@@ -21,6 +21,7 @@ enum {
TCA_MIRRED_TM,
TCA_MIRRED_PARMS,
TCA_MIRRED_PAD,
+ TCA_MIRRED_BLOCKID,
__TCA_MIRRED_MAX
};
#define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
diff --git a/include/uapi/linux/thp7312.h b/include/uapi/linux/thp7312.h
new file mode 100644
index 0000000000..2b629e05da
--- /dev/null
+++ b/include/uapi/linux/thp7312.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * THine THP7312 user space header file.
+ *
+ * Copyright (C) 2021 THine Electronics, Inc.
+ * Copyright (C) 2023 Ideas on Board Oy
+ */
+
+#ifndef __UAPI_THP7312_H_
+#define __UAPI_THP7312_H_
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_THP7312_LOW_LIGHT_COMPENSATION (V4L2_CID_USER_THP7312_BASE + 0x01)
+#define V4L2_CID_THP7312_AUTO_FOCUS_METHOD (V4L2_CID_USER_THP7312_BASE + 0x02)
+#define V4L2_CID_THP7312_NOISE_REDUCTION_AUTO (V4L2_CID_USER_THP7312_BASE + 0x03)
+#define V4L2_CID_THP7312_NOISE_REDUCTION_ABSOLUTE (V4L2_CID_USER_THP7312_BASE + 0x04)
+
+#endif /* __UAPI_THP7312_H_ */
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index d77ee6b653..078098e73f 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -73,8 +73,10 @@ struct usb_os_desc_header {
struct usb_ext_compat_desc {
__u8 bFirstInterfaceNumber;
__u8 Reserved1;
- __u8 CompatibleID[8];
- __u8 SubCompatibleID[8];
+ __struct_group(/* no tag */, IDs, /* no attrs */,
+ __u8 CompatibleID[8];
+ __u8 SubCompatibleID[8];
+ );
__u8 Reserved2[6];
};
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 0dbc810150..2841e4ea8f 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -41,7 +41,8 @@
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
UFFD_FEATURE_WP_UNPOPULATED | \
UFFD_FEATURE_POISON | \
- UFFD_FEATURE_WP_ASYNC)
+ UFFD_FEATURE_WP_ASYNC | \
+ UFFD_FEATURE_MOVE)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -50,6 +51,7 @@
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
+ (__u64)1 << _UFFDIO_MOVE | \
(__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE | \
(__u64)1 << _UFFDIO_POISON)
@@ -73,6 +75,7 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_MOVE (0x05)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
#define _UFFDIO_POISON (0x08)
@@ -92,6 +95,8 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
+#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \
+ struct uffdio_move)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
@@ -222,6 +227,9 @@ struct uffdio_api {
* asynchronous mode is supported in which the write fault is
* automatically resolved and write-protection is un-set.
* It implies UFFD_FEATURE_WP_UNPOPULATED.
+ *
+ * UFFD_FEATURE_MOVE indicates that the kernel supports moving an
+ * existing page contents from userspace.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -239,6 +247,7 @@ struct uffdio_api {
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
#define UFFD_FEATURE_POISON (1<<14)
#define UFFD_FEATURE_WP_ASYNC (1<<15)
+#define UFFD_FEATURE_MOVE (1<<16)
__u64 features;
__u64 ioctls;
@@ -347,6 +356,24 @@ struct uffdio_poison {
__s64 updated;
};
+struct uffdio_move {
+ __u64 dst;
+ __u64 src;
+ __u64 len;
+ /*
+ * Especially if used to atomically remove memory from the
+ * address space the wake on the dst range is not needed.
+ */
+#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1)
+ __u64 mode;
+ /*
+ * "move" is written by the ioctl and must be at the end: the
+ * copy_from_user will not read the last 8 bytes.
+ */
+ __s64 move;
+};
+
/*
* Flags for the userfaultfd(2) system call itself.
*/
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 68db66d4aa..99c3f5e99d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -209,6 +209,12 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
+/*
+ * The base for THine THP7312 driver controls.
+ * We reserve 32 controls for this driver.
+ */
+#define V4L2_CID_USER_THP7312_BASE (V4L2_CID_USER_BASE + 0x11c0)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index b383c2fe0c..7048c51581 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -116,13 +116,15 @@ struct v4l2_subdev_frame_size_enum {
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
* @stream: stream number, defined in subdev routing
+ * @which: interval type (from enum v4l2_subdev_format_whence)
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
struct v4l2_fract interval;
__u32 stream;
- __u32 reserved[8];
+ __u32 which;
+ __u32 reserved[7];
};
/**
@@ -133,7 +135,7 @@ struct v4l2_subdev_frame_interval {
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
- * @which: format type (from enum v4l2_subdev_format_whence)
+ * @which: interval type (from enum v4l2_subdev_format_whence)
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
@@ -239,7 +241,14 @@ struct v4l2_subdev_routing {
* set (which is the default), the 'stream' fields will be forced to 0 by the
* kernel.
*/
- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+#define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+
+/*
+ * The client is aware of the struct v4l2_subdev_frame_interval which field. If
+ * this is not set (which is the default), the which field is forced to
+ * V4L2_SUBDEV_FORMAT_ACTIVE by the kernel.
+ */
+#define V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH (1ULL << 1)
/**
* struct v4l2_subdev_client_capability - Capabilities of the client accessing
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 7f5fb01022..2b68e6cdf1 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -1219,6 +1219,7 @@ enum vfio_device_mig_state {
VFIO_DEVICE_STATE_RUNNING_P2P = 5,
VFIO_DEVICE_STATE_PRE_COPY = 6,
VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
+ VFIO_DEVICE_STATE_NR,
};
/**
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index c3d4e490ce..68e7ac178c 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1035,6 +1035,7 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
+#define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -1816,7 +1817,7 @@ struct v4l2_ext_control {
__s64 __user *p_s64;
struct v4l2_area __user *p_area;
struct v4l2_ctrl_h264_sps __user *p_h264_sps;
- struct v4l2_ctrl_h264_pps *p_h264_pps;
+ struct v4l2_ctrl_h264_pps __user *p_h264_pps;
struct v4l2_ctrl_h264_scaling_matrix __user *p_h264_scaling_matrix;
struct v4l2_ctrl_h264_pred_weights __user *p_h264_pred_weights;
struct v4l2_ctrl_h264_slice_params __user *p_h264_slice_params;
@@ -1837,6 +1838,8 @@ struct v4l2_ext_control {
struct v4l2_ctrl_av1_tile_group_entry __user *p_av1_tile_group_entry;
struct v4l2_ctrl_av1_frame __user *p_av1_frame;
struct v4l2_ctrl_av1_film_grain __user *p_av1_film_grain;
+ struct v4l2_ctrl_hdr10_cll_info __user *p_hdr10_cll_info;
+ struct v4l2_ctrl_hdr10_mastering_display __user *p_hdr10_mastering_display;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -2605,6 +2608,9 @@ struct v4l2_dbg_chip_info {
* @flags: additional buffer management attributes (ignored unless the
* queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
* and configured for MMAP streaming I/O).
+ * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set
+ * this field indicate the maximum possible number of buffers
+ * for this queue.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2614,7 +2620,8 @@ struct v4l2_create_buffers {
struct v4l2_format format;
__u32 capabilities;
__u32 flags;
- __u32 reserved[6];
+ __u32 max_num_buffers;
+ __u32 reserved[5];
};
/*
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 44f4dd2add..a8208492e8 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -175,6 +175,9 @@ struct virtio_pci_modern_common_cfg {
__le16 queue_notify_data; /* read-write */
__le16 queue_reset; /* read-write */
+
+ __le16 admin_queue_index; /* read-only */
+ __le16 admin_queue_num; /* read-only */
};
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
@@ -215,7 +218,72 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
#define VIRTIO_PCI_COMMON_Q_NDATA 56
#define VIRTIO_PCI_COMMON_Q_RESET 58
+#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
+#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#endif /* VIRTIO_PCI_NO_MODERN */
+/* Admin command status. */
+#define VIRTIO_ADMIN_STATUS_OK 0
+
+/* Admin command opcode. */
+#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0
+#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
+
+/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
+
+/* Transitional device admin command. */
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5
+#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6
+
+struct virtio_admin_cmd_hdr {
+ __le16 opcode;
+ /*
+ * 1 - SR-IOV
+ * 2-65535 - reserved
+ */
+ __le16 group_type;
+ /* Unused, reserved for future extensions. */
+ __u8 reserved1[12];
+ __le64 group_member_id;
+};
+
+struct virtio_admin_cmd_status {
+ __le16 status;
+ __le16 status_qualifier;
+ /* Unused, reserved for future extensions. */
+ __u8 reserved2[4];
+};
+
+struct virtio_admin_cmd_legacy_wr_data {
+ __u8 offset; /* Starting offset of the register(s) to write. */
+ __u8 reserved[7];
+ __u8 registers[];
+};
+
+struct virtio_admin_cmd_legacy_rd_data {
+ __u8 offset; /* Starting offset of the register(s) to read. */
+};
+
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2
+
+#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4
+
+struct virtio_admin_cmd_notify_info_data {
+ __u8 flags; /* 0 = end of list, 1 = owner device, 2 = member device */
+ __u8 bar; /* BAR of the member or the owner device */
+ __u8 padding[6];
+ __le64 offset; /* Offset within bar. */
+};
+
+struct virtio_admin_cmd_notify_info_result {
+ struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
+};
+
#endif
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
index d676b36203..ede4f35649 100644
--- a/include/uapi/linux/virtio_pmem.h
+++ b/include/uapi/linux/virtio_pmem.h
@@ -14,6 +14,13 @@
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+/* Feature bits */
+/* guest physical address range will be indicated as shared memory region 0 */
+#define VIRTIO_PMEM_F_SHMEM_REGION 0
+
+/* shmid of the shared memory region corresponding to the pmem */
+#define VIRTIO_PMEM_SHMEM_REGION_ID 0
+
struct virtio_pmem_config {
__le64 start;
__le64 size;
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 6e7c67a0cc..c0c34aca90 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -54,6 +54,8 @@ enum {
BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
+ BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
+ BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40,
};
enum bnxt_re_wqe_mode {
@@ -62,6 +64,14 @@ enum bnxt_re_wqe_mode {
BNXT_QPLIB_WQE_MODE_INVALID = 0x02,
};
+enum {
+ BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
+};
+
+struct bnxt_re_uctx_req {
+ __aligned_u64 comp_mask;
+};
+
struct bnxt_re_uctx_resp {
__u32 dev_id;
__u32 max_qp;
@@ -92,11 +102,16 @@ struct bnxt_re_cq_req {
__aligned_u64 cq_handle;
};
+enum bnxt_re_cq_mask {
+ BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT = 0x1,
+};
+
struct bnxt_re_cq_resp {
__u32 cqid;
__u32 tail;
__u32 phase;
__u32 rsvd;
+ __aligned_u64 comp_mask;
};
struct bnxt_re_resize_cq_req {
@@ -133,6 +148,7 @@ enum bnxt_re_shpg_offt {
enum bnxt_re_objects {
BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
BNXT_RE_OBJECT_NOTIFY_DRV,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
};
enum bnxt_re_alloc_page_type {
@@ -161,4 +177,29 @@ enum bnxt_re_alloc_page_methods {
enum bnxt_re_notify_drv_methods {
BNXT_RE_METHOD_NOTIFY_DRV = (1U << UVERBS_ID_NS_SHIFT),
};
+
+/* Toggle mem */
+
+enum bnxt_re_get_toggle_mem_type {
+ BNXT_RE_CQ_TOGGLE_MEM = 0,
+ BNXT_RE_SRQ_TOGGLE_MEM,
+};
+
+enum bnxt_re_var_toggle_mem_attrs {
+ BNXT_RE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_TOGGLE_MEM_TYPE,
+ BNXT_RE_TOGGLE_MEM_RES_ID,
+ BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+};
+
+enum bnxt_re_toggle_mem_attrs {
+ BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum bnxt_re_toggle_mem_methods {
+ BNXT_RE_METHOD_GET_TOGGLE_MEM = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index d94c32f288..701e2d567e 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
#define EFA_ABI_USER_H
#include <linux/types.h>
+#include <rdma/ib_user_ioctl_cmds.h>
/*
* Increment this value if any changes that break userspace ABI
@@ -134,4 +135,22 @@ struct efa_ibv_ex_query_device_resp {
__u32 device_caps;
};
+enum {
+ EFA_QUERY_MR_VALIDITY_RECV_IC_ID = 1 << 0,
+ EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID = 1 << 1,
+ EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID = 1 << 2,
+};
+
+enum efa_query_mr_attrs {
+ EFA_IB_ATTR_QUERY_MR_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
+ EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
+ EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
+ EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
+};
+
+enum efa_mr_methods {
+ EFA_IB_METHOD_MR_QUERY = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#endif /* EFA_ABI_USER_H */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index ce0f37f834..c996e15108 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -125,4 +125,9 @@ struct hns_roce_ib_alloc_pd_resp {
__u32 pdn;
};
+struct hns_roce_ib_create_ah_resp {
+ __u8 dmac[6];
+ __u8 reserved[2];
+};
+
#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a96b7d2770..d4f6a36dff 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/if_ether.h> /* For ETH_ALEN. */
#include <rdma/ib_user_ioctl_verbs.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -275,6 +276,7 @@ struct mlx5_ib_query_device_resp {
__u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
struct mlx5_ib_dci_streams_caps dci_streams_caps;
__u16 reserved;
+ struct mlx5_ib_uapi_reg reg_c0;
};
enum mlx5_ib_create_cq_flags {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 7af9e09ea5..3189c7f08d 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -64,6 +64,7 @@ enum mlx5_ib_uapi_dm_type {
MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM,
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM,
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM,
+ MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM,
};
enum mlx5_ib_uapi_devx_create_event_channel_flags {
diff --git a/include/uapi/regulator/regulator.h b/include/uapi/regulator/regulator.h
new file mode 100644
index 0000000000..71bf71a22e
--- /dev/null
+++ b/include/uapi/regulator/regulator.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Regulator uapi header
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#ifndef _UAPI_REGULATOR_H
+#define _UAPI_REGULATOR_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*
+ * Regulator notifier events.
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
+ * VOLTAGE_CHANGE Regulator voltage changed.
+ * Data passed is old voltage cast to (void *).
+ * DISABLE Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
+ * Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ * Data passed is old voltage cast to (void *).
+ * PRE_DISABLE Regulator is about to be disabled
+ * ABORT_DISABLE Regulator disable failed for some reason
+ *
+ * NOTE: These events can be OR'ed together when passed into handler.
+ */
+
+#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
+#define REGULATOR_EVENT_OVER_CURRENT 0x02
+#define REGULATOR_EVENT_REGULATION_OUT 0x04
+#define REGULATOR_EVENT_FAIL 0x08
+#define REGULATOR_EVENT_OVER_TEMP 0x10
+#define REGULATOR_EVENT_FORCE_DISABLE 0x20
+#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
+#define REGULATOR_EVENT_PRE_DISABLE 0x400
+#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems escalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
+
+struct reg_genl_event {
+ char reg_name[32];
+ uint64_t event;
+};
+
+/* attributes of reg_genl_family */
+enum {
+ REG_GENL_ATTR_UNSPEC,
+ REG_GENL_ATTR_EVENT, /* reg event info needed by user space */
+ __REG_GENL_ATTR_MAX,
+};
+
+#define REG_GENL_ATTR_MAX (__REG_GENL_ATTR_MAX - 1)
+
+/* commands supported by the reg_genl_family */
+enum {
+ REG_GENL_CMD_UNSPEC,
+ REG_GENL_CMD_EVENT, /* kernel->user notifications for reg events */
+ __REG_GENL_CMD_MAX,
+};
+
+#define REG_GENL_CMD_MAX (__REG_GENL_CMD_MAX - 1)
+
+#define REG_GENL_FAMILY_NAME "reg_event"
+#define REG_GENL_VERSION 0x01
+#define REG_GENL_MCAST_GROUP_NAME "reg_mc_group"
+
+#endif /* _UAPI_REGULATOR_H */
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
index 907d345f04..c72ce38728 100644
--- a/include/uapi/scsi/scsi_bsg_mpi3mr.h
+++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
@@ -491,6 +491,8 @@ struct mpi3_nvme_encapsulated_error_reply {
#define MPI3MR_NVME_DATA_FORMAT_PRP 0
#define MPI3MR_NVME_DATA_FORMAT_SGL1 1
#define MPI3MR_NVME_DATA_FORMAT_SGL2 2
+#define MPI3MR_NVMESGL_DATA_SEGMENT 0x00
+#define MPI3MR_NVMESGL_LAST_SEGMENT 0x03
/* MPI3: task management related definitions */
struct mpi3_scsi_task_mgmt_request {
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index f9939da411..628d46a0da 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 15)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -267,7 +267,10 @@ typedef int __bitwise snd_pcm_format_t;
typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_SUBFORMAT_STD ((__force snd_pcm_subformat_t) 0)
-#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_STD
+#define SNDRV_PCM_SUBFORMAT_MSBITS_MAX ((__force snd_pcm_subformat_t) 1)
+#define SNDRV_PCM_SUBFORMAT_MSBITS_20 ((__force snd_pcm_subformat_t) 2)
+#define SNDRV_PCM_SUBFORMAT_MSBITS_24 ((__force snd_pcm_subformat_t) 3)
+#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_MSBITS_24
#define SNDRV_PCM_INFO_MMAP 0x00000001 /* hardware supports mmap */
#define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */
@@ -413,7 +416,7 @@ struct snd_pcm_hw_params {
unsigned int rmask; /* W: requested masks */
unsigned int cmask; /* R: changed masks */
unsigned int info; /* R: Info flags for returned setup */
- unsigned int msbits; /* R: used most significant bits */
+ unsigned int msbits; /* R: used most significant bits (in sample bit-width) */
unsigned int rate_num; /* R: rate numerator */
unsigned int rate_den; /* R: rate denominator */
snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
diff --git a/include/uapi/sound/scarlett2.h b/include/uapi/sound/scarlett2.h
new file mode 100644
index 0000000000..91467ab0ed
--- /dev/null
+++ b/include/uapi/sound/scarlett2.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Focusrite Scarlett 2 Protocol Driver for ALSA
+ * (including Scarlett 2nd Gen, 3rd Gen, 4th Gen, Clarett USB, and
+ * Clarett+ series products)
+ *
+ * Copyright (c) 2023 by Geoffrey D. Bennett <g at b4.vu>
+ */
+#ifndef __UAPI_SOUND_SCARLETT2_H
+#define __UAPI_SOUND_SCARLETT2_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SCARLETT2_HWDEP_MAJOR 1
+#define SCARLETT2_HWDEP_MINOR 0
+#define SCARLETT2_HWDEP_SUBMINOR 0
+
+#define SCARLETT2_HWDEP_VERSION \
+ ((SCARLETT2_HWDEP_MAJOR << 16) | \
+ (SCARLETT2_HWDEP_MINOR << 8) | \
+ SCARLETT2_HWDEP_SUBMINOR)
+
+#define SCARLETT2_HWDEP_VERSION_MAJOR(v) (((v) >> 16) & 0xFF)
+#define SCARLETT2_HWDEP_VERSION_MINOR(v) (((v) >> 8) & 0xFF)
+#define SCARLETT2_HWDEP_VERSION_SUBMINOR(v) ((v) & 0xFF)
+
+/* Get protocol version */
+#define SCARLETT2_IOCTL_PVERSION _IOR('S', 0x60, int)
+
+/* Reboot */
+#define SCARLETT2_IOCTL_REBOOT _IO('S', 0x61)
+
+/* Select flash segment */
+#define SCARLETT2_SEGMENT_ID_SETTINGS 0
+#define SCARLETT2_SEGMENT_ID_FIRMWARE 1
+#define SCARLETT2_SEGMENT_ID_COUNT 2
+
+#define SCARLETT2_IOCTL_SELECT_FLASH_SEGMENT _IOW('S', 0x62, int)
+
+/* Erase selected flash segment */
+#define SCARLETT2_IOCTL_ERASE_FLASH_SEGMENT _IO('S', 0x63)
+
+/* Get selected flash segment erase progress
+ * 1 through to num_blocks, or 255 for complete
+ */
+struct scarlett2_flash_segment_erase_progress {
+ unsigned char progress;
+ unsigned char num_blocks;
+};
+#define SCARLETT2_IOCTL_GET_ERASE_PROGRESS \
+ _IOR('S', 0x64, struct scarlett2_flash_segment_erase_progress)
+
+#endif /* __UAPI_SOUND_SCARLETT2_H */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 453cab2a12..ee57089346 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -35,6 +35,7 @@
/* buffers */
#define SOF_TKN_BUF_SIZE 100
#define SOF_TKN_BUF_CAPS 101
+#define SOF_TKN_BUF_FLAGS 102
/* DAI */
/* Token retired with ABI 3.2, do not use for new capabilities
@@ -213,4 +214,8 @@
#define SOF_TKN_AMD_ACPI2S_CH 1701
#define SOF_TKN_AMD_ACPI2S_TDM_MODE 1702
+/* MICFIL PDM */
+#define SOF_TKN_IMX_MICFIL_RATE 2000
+#define SOF_TKN_IMX_MICFIL_CH 2001
+
#endif
diff --git a/include/uapi/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 48d2790ef9..3109282672 100644
--- a/include/uapi/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
@@ -31,7 +31,10 @@ struct ioctl_gntalloc_alloc_gref {
__u64 index;
/* The grant references of the newly created grant, one per page */
/* Variable size, depending on count */
- __u32 gref_ids[1];
+ union {
+ __u32 gref_ids[1];
+ __DECLARE_FLEX_ARRAY(__u32, gref_ids_flex);
+ };
};
#define GNTALLOC_FLAG_WRITABLE 1
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index e77ab17868..b6003749bc 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/types.h>
#include <uapi/scsi/scsi_bsg_ufs.h>
+#include <linux/time64.h>
/*
* Using static_assert() is not allowed in UAPI header files. Hence the check
@@ -551,6 +552,14 @@ struct ufs_vreg_info {
struct ufs_vreg *vdd_hba;
};
+/* UFS device descriptor wPeriodicRTCUpdate bit9 defines RTC time baseline */
+#define UFS_RTC_TIME_BASELINE BIT(9)
+
+enum ufs_rtc_time {
+ UFS_RTC_RELATIVE,
+ UFS_RTC_ABSOLUTE
+};
+
struct ufs_dev_info {
bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
@@ -578,6 +587,11 @@ struct ufs_dev_info {
/* UFS EXT_IID Enable */
bool b_ext_iid_en;
+
+ /* UFS RTC */
+ enum ufs_rtc_time rtc_type;
+ time64_t rtc_time_baseline;
+ u32 rtc_update_period;
};
/*
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 7f0b2c5599..8e2bce9a4f 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -16,6 +16,7 @@
#include <linux/blk-crypto-profile.h>
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
+#include <linux/fault-inject.h>
#include <linux/msi.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
@@ -911,6 +912,8 @@ enum ufshcd_mcq_opr {
* @mcq_base: Multi circular queue registers base address
* @uhq: array of supported hardware queues
* @dev_cmd_queue: Queue for issuing device management commands
+ * @mcq_opr: MCQ operation and runtime registers
+ * @ufs_rtc_update_work: A work for UFS RTC periodic update
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -1058,6 +1061,10 @@ struct ufs_hba {
struct delayed_work debugfs_ee_work;
u32 debugfs_ee_rate_limit_ms;
#endif
+#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
+ struct fault_attr trigger_eh_attr;
+ struct fault_attr timeout_attr;
+#endif
u32 luns_avail;
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
@@ -1071,6 +1078,8 @@ struct ufs_hba {
struct ufs_hw_queue *uhq;
struct ufs_hw_queue *dev_cmd_queue;
struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
+
+ struct delayed_work ufs_rtc_update_work;
};
/**
@@ -1231,6 +1240,8 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
ufshcd_writel(hba, tmp, reg);
}
+void ufshcd_enable_irq(struct ufs_hba *hba);
+void ufshcd_disable_irq(struct ufs_hba *hba);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
@@ -1360,7 +1371,6 @@ static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
}
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups);
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index 256eb3a43f..360e1245fb 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -193,7 +193,7 @@
#define DME_LocalAFC0ReqTimeOutVal 0xD043
/* PA power modes */
-enum {
+enum ufs_pa_pwr_mode {
FAST_MODE = 1,
SLOW_MODE = 2,
FASTAUTO_MODE = 4,
@@ -205,7 +205,7 @@ enum {
#define PWRMODE_RX_OFFSET 4
/* PA TX/RX Frequency Series */
-enum {
+enum ufs_hs_gear_rate {
PA_HS_MODE_A = 1,
PA_HS_MODE_B = 2,
};
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
new file mode 100644
index 0000000000..c50d152e7b
--- /dev/null
+++ b/include/vdso/gettime.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VDSO_GETTIME_H
+#define _VDSO_GETTIME_H
+
+#include <linux/types.h>
+
+struct __kernel_timespec;
+struct timezone;
+
+#if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+struct old_timespec32;
+int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res);
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
+#else
+int __vdso_clock_getres(clockid_t clock, struct __kernel_timespec *res);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+#endif
+
+__kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+
+#endif
diff --git a/include/video/sticore.h b/include/video/sticore.h
index 012b5b46ad..823666af18 100644
--- a/include/video/sticore.h
+++ b/include/video/sticore.h
@@ -2,7 +2,7 @@
#ifndef STICORE_H
#define STICORE_H
-struct fb_info;
+struct device;
/* generic STI structures & functions */
@@ -367,8 +367,8 @@ struct sti_struct {
/* PCI data structures (pg. 17ff from sti.pdf) */
u8 rm_entry[16]; /* pci region mapper array == pci config space offset */
- /* pointer to the fb_info where this STI device is used */
- struct fb_info *info;
+ /* pointer to the parent device */
+ struct device *dev;
/* pointer to all internal data */
struct sti_all_data *sti_data;
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index 18417b0178..60e42d3b76 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -537,7 +537,7 @@ struct xendispl_dbuf_create_req {
struct xendispl_page_directory {
grant_ref_t gref_dir_next_page;
- grant_ref_t gref[1]; /* Variable length */
+ grant_ref_t gref[];
};
/*
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index ba4c4274b7..4fef1efcdc 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -95,7 +95,7 @@ struct __name##_sring { \
RING_IDX req_prod, req_event; \
RING_IDX rsp_prod, rsp_event; \
uint8_t __pad[48]; \
- union __name##_sring_entry ring[1]; /* variable-length */ \
+ union __name##_sring_entry ring[]; \
}; \
\
/* "Front" end's private variables */ \
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 445657cdb1..b818517588 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -659,7 +659,7 @@ struct xensnd_open_req {
struct xensnd_page_directory {
grant_ref_t gref_dir_next_page;
- grant_ref_t gref[1]; /* Variable length */
+ grant_ref_t gref[];
};
/*