diff options
Diffstat (limited to 'include')
75 files changed, 789 insertions, 116 deletions
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 81c298488..6b5eec10c 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -24,6 +24,7 @@ #define __DRM_COLOR_MGMT_H__ #include <linux/ctype.h> +#include <linux/math64.h> #include <drm/drm_property.h> struct drm_crtc; diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h index 553210c02..627efa56e 100644 --- a/include/drm/drm_fixed.h +++ b/include/drm/drm_fixed.h @@ -88,7 +88,7 @@ static inline int drm_fixp2int(s64 a) static inline int drm_fixp2int_ceil(s64 a) { - if (a > 0) + if (a >= 0) return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); else return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 31ba85a41..3c0d1495c 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -161,6 +161,7 @@ struct mipi_dsi_device_info { * struct mipi_dsi_device - DSI peripheral device * @host: DSI host for this peripheral * @dev: driver model device node for this peripheral + * @attached: the DSI device has been successfully attached * @name: DSI peripheral chip type * @channel: virtual channel assigned to the peripheral * @format: pixel format for video mode @@ -176,6 +177,7 @@ struct mipi_dsi_device_info { struct mipi_dsi_device { struct mipi_dsi_host *host; struct device dev; + bool attached; char name[DSI_DEV_NAME_SIZE]; unsigned int channel; diff --git a/include/linux/async.h b/include/linux/async.h index 0a17cd27f..d5496a520 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev) return async_schedule_node(func, dev, dev_to_node(dev)); } +bool async_schedule_dev_nocall(async_func_t func, struct device *dev); + /** * async_schedule_dev_domain - A device specific version of async_schedule_domain * @func: function to execute asynchronously diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 98fdf5a31..a47e1aeba 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -345,6 +345,7 @@ struct queue_limits { unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; + unsigned int zone_write_granularity; unsigned short max_segments; unsigned short max_integrity_segments; @@ -1169,6 +1170,8 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); extern void blk_queue_max_zone_append_sectors(struct request_queue *q, unsigned int max_zone_append_sectors); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); +void blk_queue_zone_write_granularity(struct request_queue *q, + unsigned int size); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); void blk_queue_update_readahead(struct request_queue *q); @@ -1480,6 +1483,18 @@ static inline int bdev_io_opt(struct block_device *bdev) return queue_io_opt(bdev_get_queue(bdev)); } +static inline unsigned int +queue_zone_write_granularity(const struct request_queue *q) +{ + return q->limits.zone_write_granularity; +} + +static inline unsigned int +bdev_zone_write_granularity(struct block_device *bdev) +{ + return queue_zone_write_granularity(bdev_get_queue(bdev)); +} + static inline int queue_alignment_offset(const struct request_queue *q) { if (q->limits.misaligned) @@ -1883,6 +1898,7 @@ struct block_device_operations { void (*unlock_native_capacity) (struct gendisk *); int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); + int (*set_read_only)(struct block_device *bdev, bool ro); /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); int (*report_zones)(struct gendisk *, sector_t sector, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8f4379e93..a75faf437 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -82,7 +82,11 @@ struct bpf_map_ops { /* funcs called by prog_array and perf_event_array map */ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, int fd); - void (*map_fd_put_ptr)(void *ptr); + /* If need_defer is true, the implementation should guarantee that + * the to-be-put element is still alive before the bpf program, which + * may manipulate it, exists. + */ + void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer); int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); u32 (*map_fd_sys_lookup_elem)(void *ptr); void (*map_seq_show_elem)(struct bpf_map *map, void *key, @@ -171,9 +175,14 @@ struct bpf_map { */ atomic64_t refcnt ____cacheline_aligned; atomic64_t usercnt; - struct work_struct work; + /* rcu is used before freeing and work is only used during freeing */ + union { + struct work_struct work; + struct rcu_head rcu; + }; struct mutex freeze_mutex; atomic64_t writecnt; + bool free_after_mult_rcu_gp; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6d7ab0161..2099226d8 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -74,6 +74,8 @@ extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/device.h b/include/linux/device.h index 6394c4b70..9c9ce573c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -973,6 +973,7 @@ void device_link_del(struct device_link *link); void device_link_remove(void *consumer, struct device *supplier); void device_links_supplier_sync_state_pause(void); void device_links_supplier_sync_state_resume(void); +void device_link_wait_removal(void); extern __printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index dd357a747..4e4cce0ad 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -949,7 +949,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, static inline bool is_slave_direction(enum dma_transfer_direction direction) { - return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); + return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) || + (direction == DMA_DEV_TO_DEV); } static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index b060514bf..bbd3aeb2f 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -543,6 +543,31 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) } /** + * eth_skb_pkt_type - Assign packet type if destination address does not match + * @skb: Assigned a packet type if address does not match @dev address + * @dev: Network device used to compare packet address against + * + * If the destination MAC address of the packet does not match the network + * device address, assign an appropriate packet type. + */ +static inline void eth_skb_pkt_type(struct sk_buff *skb, + const struct net_device *dev) +{ + const struct ethhdr *eth = eth_hdr(skb); + + if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) { + if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) { + if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) + skb->pkt_type = PACKET_BROADCAST; + else + skb->pkt_type = PACKET_MULTICAST; + } else { + skb->pkt_type = PACKET_OTHERHOST; + } + } +} + +/** * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame * @skb: Buffer to pad * diff --git a/include/linux/filter.h b/include/linux/filter.h index bc6ce4b20..840b2a05c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -480,24 +480,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ u64, __ur_3, u64, __ur_4, u64, __ur_5) -#define BPF_CALL_x(x, name, ...) \ +#define BPF_CALL_x(x, attr, name, ...) \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ - u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ - u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ + attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ + attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ { \ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ } \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) -#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) -#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) -#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) -#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) -#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) -#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) +#define __NOATTR +#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__) + +#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__) #define bpf_ctx_range(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 @@ -892,8 +895,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); void sk_reuseport_prog_free(struct bpf_prog *prog); int sk_detach_filter(struct sock *sk); -int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, - unsigned int len); +int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len); bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); diff --git a/include/linux/fs.h b/include/linux/fs.h index 82316863c..6de70634e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -316,6 +316,8 @@ enum rw_hint { /* iocb->ki_waitq is valid */ #define IOCB_WAITQ (1 << 19) #define IOCB_NOIO (1 << 20) +/* kiocb is a read or write operation submitted by fs/aio.c. */ +#define IOCB_AIO_RW (1 << 23) struct kiocb { struct file *ki_filp; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c603237e0..973f4143f 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -623,6 +623,15 @@ static inline bool pm_suspended_storage(void) } #endif /* CONFIG_PM_SLEEP */ +/* + * Check if the gfp flags allow compaction - GFP_NOIO is a really + * tricky context because the migration might require IO. + */ +static inline bool gfp_compaction_allowed(gfp_t gfp_mask) +{ + return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO); +} + #ifdef CONFIG_CONTIG_ALLOC /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index a88be8bd4..54a3ad7bf 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -197,6 +197,7 @@ enum hrtimer_base_type { * @max_hang_time: Maximum time spent in hrtimer_interrupt * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are * expired + * @online: CPU is online from an hrtimers point of view * @timer_waiters: A hrtimer_cancel() invocation waits for the timer * callback to finish. * @expires_next: absolute time of the next event, is required for remote @@ -219,7 +220,8 @@ struct hrtimer_cpu_base { unsigned int hres_active : 1, in_hrtirq : 1, hang_detected : 1, - softirq_activated : 1; + softirq_activated : 1, + online : 1; #ifdef CONFIG_HIGH_RES_TIMERS unsigned int nr_events; unsigned short nr_retries; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 99b73fc4a..90c66b945 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -140,7 +140,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, unsigned long dst_addr, unsigned long src_addr, struct page **pagep); -int hugetlb_reserve_pages(struct inode *inode, long from, long to, +bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index eada4d8d6..b606a203d 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -164,8 +164,28 @@ struct hv_ring_buffer { u8 buffer[]; } __packed; + +/* + * If the requested ring buffer size is at least 8 times the size of the + * header, steal space from the ring buffer for the header. Otherwise, add + * space for the header so that is doesn't take too much of the ring buffer + * space. + * + * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a + * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring + * buffer size (such as 128 Kbytes) and so end up making a nearly twice as + * large allocation that will be almost half wasted. As a contrasting example, + * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the + * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring. + * In this latter case, we must add 64 Kbytes for the header and not worry + * about what's wasted. + */ +#define VMBUS_HEADER_ADJ(payload_sz) \ + ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \ + 0 : sizeof(struct hv_ring_buffer)) + /* Calculate the proper size of a ringbuffer, it must be page-aligned */ -#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \ +#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \ (payload_sz)) struct hv_ring_buffer_info { @@ -764,6 +784,23 @@ enum vmbus_device_type { HV_UNKNOWN, }; +/* + * Provides request ids for VMBus. Encapsulates guest memory + * addresses and stores the next available slot in req_arr + * to generate new ids in constant time. + */ +struct vmbus_requestor { + u64 *req_arr; + unsigned long *req_bitmap; /* is a given slot available? */ + u32 size; + u64 next_request_id; + spinlock_t req_lock; /* provides atomicity */ +}; + +#define VMBUS_NO_RQSTOR U64_MAX +#define VMBUS_RQST_ERROR (U64_MAX - 1) +#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2) + struct vmbus_device { u16 dev_type; guid_t guid; @@ -988,8 +1025,14 @@ struct vmbus_channel { u32 fuzz_testing_interrupt_delay; u32 fuzz_testing_message_delay; + /* request/transaction ids for VMBus */ + struct vmbus_requestor requestor; + u32 rqstor_size; }; +u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr); +u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id); + static inline bool is_hvsock_channel(const struct vmbus_channel *c) { return !!(c->offermsg.offer.chn_flags & diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 64ce8cd1c..4adab8ada 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -121,9 +121,9 @@ extern int ip_mc_source(int add, int omode, struct sock *sk, struct ip_mreq_source *mreqs, int ifindex); extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, - struct ip_msfilter __user *optval, int __user *optlen); + sockptr_t optval, sockptr_t optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, - struct sockaddr_storage __user *p); + sockptr_t optval, size_t offset); extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif, int sdif); extern void ip_mc_init_dev(struct in_device *); diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 649a4d7c2..55d09f594 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -6,9 +6,9 @@ #include <linux/xarray.h> #if defined(CONFIG_IO_URING) -struct sock *io_uring_get_socket(struct file *file); void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); +bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) { @@ -26,10 +26,6 @@ static inline void io_uring_free(struct task_struct *tsk) __io_uring_free(tsk); } #else -static inline struct sock *io_uring_get_socket(struct file *file) -{ - return NULL; -} static inline void io_uring_task_cancel(void) { } @@ -39,6 +35,10 @@ static inline void io_uring_files_cancel(void) static inline void io_uring_free(struct task_struct *tsk) { } +static inline bool io_is_uring_fops(struct file *file) +{ + return false; +} #endif #endif diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 3ed4e8771..a53adf431 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -133,7 +133,7 @@ do { \ # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) # define lockdep_hrtimer_enter(__hrtimer) false -# define lockdep_hrtimer_exit(__context) do { } while (0) +# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0) # define lockdep_posixtimer_enter() do { } while (0) # define lockdep_posixtimer_exit() do { } while (0) # define lockdep_irq_work_enter(__work) do { } while (0) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2c2586312..3eca9f91b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -321,6 +321,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ } while (0) +#define lockdep_assert_none_held_once() do { \ + WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ + } while (0) + #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) @@ -394,6 +398,7 @@ static inline void lockdep_unregister_key(struct lock_class_key *key) #define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) +#define lockdep_assert_none_held_once() do { } while (0) #define lockdep_recursing(tsk) (0) diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 693ed9c61..07abcd384 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -158,6 +158,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file) LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file) LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd, unsigned long arg) +LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd, + unsigned long arg) LSM_HOOK(int, 0, mmap_addr, unsigned long addr) LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) @@ -291,9 +293,9 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) -LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock, - char __user *optval, int __user *optlen, unsigned len) -LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock, +LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock, + sockptr_t optval, sockptr_t optlen, unsigned int len) +LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock, struct sk_buff *skb, u32 *secid) LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 64cdf4d7b..bbf9c8c7b 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -926,8 +926,8 @@ * SO_GETPEERSEC. For tcp sockets this can be meaningful if the * socket is associated with an ipsec SA. * @sock is the local socket. - * @optval userspace memory where the security state is to be copied. - * @optlen userspace int where the module should copy the actual length + * @optval memory where the security state is to be copied. + * @optlen memory where the module should copy the actual length * of the security state. * @len as input is the maximum length to copy to userspace provided * by the caller. diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index d75ef8aa8..28d44061d 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -261,7 +261,10 @@ struct mlx5_wqe_eth_seg { union { struct { __be16 sz; - u8 start[2]; + union { + u8 start[2]; + DECLARE_FLEX_ARRAY(u8, data); + }; } inline_hdr; struct { __be16 type; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b2e4599b8..ffae2b330 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1188,6 +1188,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec) #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) struct mem_section_usage { + struct rcu_head rcu; #ifdef CONFIG_SPARSEMEM_VMEMMAP DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); #endif @@ -1353,7 +1354,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) { int idx = subsection_map_index(pfn); - return test_bit(idx, ms->usage->subsection_map); + return test_bit(idx, READ_ONCE(ms->usage)->subsection_map); } #else static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) @@ -1366,17 +1367,24 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) static inline int pfn_valid(unsigned long pfn) { struct mem_section *ms; + int ret; if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - ms = __nr_to_section(pfn_to_section_nr(pfn)); - if (!valid_section(ms)) + ms = __pfn_to_section(pfn); + rcu_read_lock(); + if (!valid_section(ms)) { + rcu_read_unlock(); return 0; + } /* * Traditionally early sections always returned pfn_valid() for * the entire section-sized span. */ - return early_section(ms) || pfn_section_valid(ms, pfn); + ret = early_section(ms) || pfn_section_valid(ms, pfn); + rcu_read_unlock(); + + return ret; } #endif @@ -1384,7 +1392,7 @@ static inline int pfn_in_present_section(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - return present_section(__nr_to_section(pfn_to_section_nr(pfn))); + return present_section(__pfn_to_section(pfn)); } static inline unsigned long next_present_section_nr(unsigned long section_nr) diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 6cbbfe943..80b8400ab 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -17,7 +17,7 @@ static inline int ip_mroute_opt(int opt) } int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); -int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); int ip_mr_init(void); @@ -29,8 +29,8 @@ static inline int ip_mroute_setsockopt(struct sock *sock, int optname, return -ENOPROTOOPT; } -static inline int ip_mroute_getsockopt(struct sock *sock, int optname, - char __user *optval, int __user *optlen) +static inline int ip_mroute_getsockopt(struct sock *sk, int optname, + sockptr_t optval, sockptr_t optlen) { return -ENOPROTOOPT; } diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 62f7e7e25..f27894e50 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -188,6 +188,8 @@ struct ip_set_type_variant { /* Return true if "b" set is the same as "a" * according to the create set parameters */ bool (*same_set)(const struct ip_set *a, const struct ip_set *b); + /* Cancel ongoing garbage collectors before destroying the set*/ + void (*cancel_gc)(struct ip_set *set); /* Region-locking is used */ bool region_lock; }; @@ -239,6 +241,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type); /* A generic IP set */ struct ip_set { + /* For call_cru in destroy */ + struct rcu_head rcu; /* The name of the set */ char name[IPSET_MAXNAMELEN]; /* Lock protecting the set data */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index e39342945..748886458 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -553,6 +553,7 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); +void nfs_commit_begin(struct nfs_mds_commit_info *cinfo); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); static inline int diff --git a/include/linux/objtool.h b/include/linux/objtool.h index d17fa7f40..0f13875ac 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -141,6 +141,12 @@ struct unwind_hint { .popsection .endm +.macro STACK_FRAME_NON_STANDARD func:req + .pushsection .discard.func_stack_frame_non_standard, "aw" + .long \func - . + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -158,6 +164,8 @@ struct unwind_hint { .endm .macro ANNOTATE_NOENDBR .endm +.macro STACK_FRAME_NON_STANDARD func:req +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 550e1cdb4..5b24a6fbf 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -306,6 +306,7 @@ struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_p2pdma; +struct rcec_ea; /* The pci_dev structure describes PCI devices */ struct pci_dev { @@ -329,6 +330,10 @@ struct pci_dev { u16 aer_cap; /* AER capability offset */ struct aer_stats *aer_stats; /* AER stats for this device */ #endif +#ifdef CONFIG_PCIEPORTBUS + struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ +#endif + u32 devcap; /* PCIe Device Capabilities */ u8 pcie_cap; /* PCIe capability offset */ u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ @@ -449,6 +454,7 @@ struct pci_dev { unsigned int link_active_reporting:1;/* Device capable of reporting link active */ unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ + unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -2191,6 +2197,11 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) return NULL; } +static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) +{ + return dev->error_state == pci_channel_io_perm_failure; +} + void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1a41147b2..80744a7b5 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3020,6 +3020,7 @@ #define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 #define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 #define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 +#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728 #define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 #define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a #define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183 diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h index 71d956935..86ccd90e4 100644 --- a/include/linux/phy/tegra/xusb.h +++ b/include/linux/phy/tegra/xusb.h @@ -23,4 +23,6 @@ int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl, int tegra_phy_xusb_utmi_port_reset(struct phy *phy); int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, unsigned int port); +int tegra_xusb_padctl_get_port_number(struct phy *phy); + #endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index ef236dbaa..7b72d93c2 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -125,6 +125,22 @@ struct pipe_buf_operations { }; /** + * pipe_has_watch_queue - Check whether the pipe is a watch_queue, + * i.e. it was created with O_NOTIFICATION_PIPE + * @pipe: The pipe to check + * + * Return: true if pipe is a watch queue, false otherwise. + */ +static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe) +{ +#ifdef CONFIG_WATCH_QUEUE + return pipe->watch_queue != NULL; +#else + return false; +#endif +} + +/** * pipe_empty - Return true if the pipe is empty * @head: The pipe ring head pointer * @tail: The pipe ring tail pointer diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 718600e83..ca856e582 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -60,6 +60,8 @@ extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); extern void pm_runtime_release_supplier(struct device_link *link); +extern int devm_pm_runtime_enable(struct device *dev); + /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. * @dev: Target device. @@ -254,6 +256,8 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } + static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} @@ -535,6 +539,10 @@ static inline void pm_runtime_disable(struct device *dev) * Allow the runtime PM autosuspend mechanism to be used for @dev whenever * requested (or "autosuspend" will be handled as direct runtime-suspend for * it). + * + * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend() + * at driver exit time unless your driver initially enabled pm_runtime + * with devm_pm_runtime_enable() (which handles it for you). */ static inline void pm_runtime_use_autosuspend(struct device *dev) { diff --git a/include/linux/poll.h b/include/linux/poll.h index 1cdc32b1f..7e0fdcf90 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -16,11 +16,7 @@ extern struct ctl_table epoll_table[]; /* for sysctl */ /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating additional memory. */ -#ifdef __clang__ -#define MAX_STACK_ALLOC 768 -#else #define MAX_STACK_ALLOC 832 -#endif #define FRONTEND_STACK_ALLOC 256 #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8716a1706..9db6710e6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -202,6 +202,18 @@ static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** + * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? + * + * As an accident of implementation, an RCU Tasks Trace grace period also + * acts as an RCU grace period. However, this could change at any time. + * Code relying on this accident must call this function to verify that + * this accident is still happening. + * + * You have been warned! + */ +static inline bool rcu_trace_implies_rcu_gp(void) { return true; } + +/** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to @@ -214,6 +226,37 @@ do { \ cond_resched(); \ } while (0) +/** + * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states + * @old_ts: jiffies at start of processing. + * + * This helper is for long-running softirq handlers, such as NAPI threads in + * networking. The caller should initialize the variable passed in as @old_ts + * at the beginning of the softirq handler. When invoked frequently, this macro + * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will + * provide both RCU and RCU-Tasks quiescent states. Note that this macro + * modifies its old_ts argument. + * + * Because regions of code that have disabled softirq act as RCU read-side + * critical sections, this macro should be invoked with softirq (and + * preemption) enabled. + * + * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would + * have more chance to invoke schedule() calls and provide necessary quiescent + * states. As a contrast, calling cond_resched() only won't achieve the same + * effect because cond_resched() does not provide RCU-Tasks quiescent states. + */ +#define rcu_softirq_qs_periodic(old_ts) \ +do { \ + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ + time_after(jiffies, (old_ts) + HZ / 10)) { \ + preempt_disable(); \ + rcu_softirq_qs(); \ + preempt_enable(); \ + (old_ts) = jiffies; \ + } \ +} while (0) + /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. diff --git a/include/linux/regmap.h b/include/linux/regmap.h index e7834d982..83a7485de 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -289,6 +289,17 @@ typedef void (*regmap_unlock)(void *); * read operation on a bus such as SPI, I2C, etc. Most of the * devices do not need this. * @reg_write: Same as above for writing. + * @reg_update_bits: Optional callback that if filled will be used to perform + * all the update_bits(rmw) operation. Should only be provided + * if the function require special handling with lock and reg + * handling and the operation cannot be represented as a simple + * update_bits operation on a bus such as SPI, I2C, etc. + * @read: Optional callback that if filled will be used to perform all the + * bulk reads from the registers. Data is returned in the buffer used + * to transmit data. + * @write: Same as above for writing. + * @max_raw_read: Max raw read size that can be used on the device. + * @max_raw_write: Max raw write size that can be used on the device. * @fast_io: Register IO is fast. Use a spinlock instead of a mutex * to perform locking. This field is ignored if custom lock/unlock * functions are used (see fields lock/unlock of struct regmap_config). @@ -366,6 +377,14 @@ struct regmap_config { int (*reg_read)(void *context, unsigned int reg, unsigned int *val); int (*reg_write)(void *context, unsigned int reg, unsigned int val); + int (*reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); + /* Bulk read/write */ + int (*read)(void *context, const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); + int (*write)(void *context, const void *data, size_t count); + size_t max_raw_read; + size_t max_raw_write; bool fast_io; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 3fa3ba649..2546758f1 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -368,7 +368,9 @@ enum rsc_handling_status { * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a * negative value on error * @load_rsc_table: load resource table from firmware image - * @find_loaded_rsc_table: find the loaded resouce table + * @find_loaded_rsc_table: find the loaded resource table from firmware image + * @get_loaded_rsc_table: get resource table installed in memory + * by external entity * @load: load firmware to memory, where the remote processor * expects to find it * @sanity_check: sanity check the fw image @@ -389,6 +391,8 @@ struct rproc_ops { int offset, int avail); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); + struct resource_table *(*get_loaded_rsc_table)( + struct rproc *rproc, size_t *size); int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index f24575942..879a5c8f9 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -16,7 +16,7 @@ * try_get_task_stack() instead. task_stack_page will return a pointer * that could get freed out from under you. */ -static inline void *task_stack_page(const struct task_struct *task) +static __always_inline void *task_stack_page(const struct task_struct *task) { return task->stack; } diff --git a/include/linux/security.h b/include/linux/security.h index e9b4b5410..5b61aa19f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -31,6 +31,7 @@ #include <linux/err.h> #include <linux/string.h> #include <linux/mm.h> +#include <linux/sockptr.h> struct linux_binprm; struct cred; @@ -368,6 +369,8 @@ int security_file_permission(struct file *file, int mask); int security_file_alloc(struct file *file); void security_file_free(struct file *file); int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int security_file_ioctl_compat(struct file *file, unsigned int cmd, + unsigned long arg); int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags); int security_mmap_addr(unsigned long addr); @@ -925,6 +928,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd, return 0; } +static inline int security_file_ioctl_compat(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + return 0; +} + static inline int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { @@ -1357,8 +1367,8 @@ int security_socket_getsockopt(struct socket *sock, int level, int optname); int security_socket_setsockopt(struct socket *sock, int level, int optname); int security_socket_shutdown(struct socket *sock, int how); int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb); -int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, - int __user *optlen, unsigned len); +int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval, + sockptr_t optlen, unsigned int len); int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid); int security_sk_alloc(struct sock *sk, int family, gfp_t priority); void security_sk_free(struct sock *sk); @@ -1492,8 +1502,10 @@ static inline int security_sock_rcv_skb(struct sock *sk, return 0; } -static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, - int __user *optlen, unsigned len) +static inline int security_socket_getpeersec_stream(struct socket *sock, + sockptr_t optval, + sockptr_t optlen, + unsigned int len) { return -ENOPROTOOPT; } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 6df4c3356..48b9cd8fd 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -254,11 +254,91 @@ struct uart_port { struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; + const struct serial_rs485 *rs485_supported; /* Supported mask for serial_rs485 */ struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ }; +/** + * uart_port_lock - Lock the UART port + * @up: Pointer to UART port structure + */ +static inline void uart_port_lock(struct uart_port *up) +{ + spin_lock(&up->lock); +} + +/** + * uart_port_lock_irq - Lock the UART port and disable interrupts + * @up: Pointer to UART port structure + */ +static inline void uart_port_lock_irq(struct uart_port *up) +{ + spin_lock_irq(&up->lock); +} + +/** + * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts + * @up: Pointer to UART port structure + * @flags: Pointer to interrupt flags storage + */ +static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags) +{ + spin_lock_irqsave(&up->lock, *flags); +} + +/** + * uart_port_trylock - Try to lock the UART port + * @up: Pointer to UART port structure + * + * Returns: True if lock was acquired, false otherwise + */ +static inline bool uart_port_trylock(struct uart_port *up) +{ + return spin_trylock(&up->lock); +} + +/** + * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts + * @up: Pointer to UART port structure + * @flags: Pointer to interrupt flags storage + * + * Returns: True if lock was acquired, false otherwise + */ +static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags) +{ + return spin_trylock_irqsave(&up->lock, *flags); +} + +/** + * uart_port_unlock - Unlock the UART port + * @up: Pointer to UART port structure + */ +static inline void uart_port_unlock(struct uart_port *up) +{ + spin_unlock(&up->lock); +} + +/** + * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts + * @up: Pointer to UART port structure + */ +static inline void uart_port_unlock_irq(struct uart_port *up) +{ + spin_unlock_irq(&up->lock); +} + +/** + * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts + * @up: Pointer to UART port structure + * @flags: The saved interrupt flags for restore + */ +static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags) +{ + spin_unlock_irqrestore(&up->lock, flags); +} + static inline int serial_port_in(struct uart_port *up, int offset) { return up->serial_in(up, offset); diff --git a/include/linux/socket.h b/include/linux/socket.h index c3b35d18b..daf51fef5 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -31,7 +31,10 @@ typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family; /* address family, AF_xxx */ - char sa_data[14]; /* 14 bytes of protocol address */ + union { + char sa_data_min[14]; /* Minimum 14 bytes of protocol address */ + DECLARE_FLEX_ARRAY(char, sa_data); + }; }; struct linger { diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index ea1934142..38862819e 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -64,6 +64,11 @@ static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, return 0; } +static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) +{ + return copy_to_sockptr_offset(dst, 0, src, size); +} + static inline void *memdup_sockptr(sockptr_t src, size_t len) { void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index e1d88630f..ab7747549 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -171,6 +171,7 @@ struct spi_device { #define SPI_MODE_1 (0|SPI_CPHA) #define SPI_MODE_2 (SPI_CPOL|0) #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) +#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA) #define SPI_CS_HIGH 0x04 /* chipselect active high? */ #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ #define SPI_3WIRE 0x10 /* SI/SO signals shared */ diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 938216f8a..31fdbb784 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -84,4 +84,17 @@ enum { #define struct_group_tagged(TAG, NAME, MEMBERS...) \ __struct_group(TAG, NAME, /* no attrs */, MEMBERS) +/** + * DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union + * + * @TYPE: The type of each flexible array element + * @NAME: The name of the flexible array member + * + * In order to have a flexible array member in a union or alone in a + * struct, it needs to be wrapped in an anonymous struct with at least 1 + * named member, but that member can be empty. + */ +#define DECLARE_FLEX_ARRAY(TYPE, NAME) \ + __DECLARE_FLEX_ARRAY(TYPE, NAME) + #endif diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 256dff36c..0527a4bc9 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -197,7 +197,7 @@ struct rpc_wait_queue { unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ unsigned char priority; /* current priority */ unsigned char nr; /* # tasks remaining for cookie */ - unsigned short qlen; /* total # tasks waiting in queue */ + unsigned int qlen; /* total # tasks waiting in queue */ struct rpc_timer timer_list; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) const char * name; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a058c96cf..17a24e118 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -119,6 +119,7 @@ struct open_how; #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL)) #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a #define __SC_CAST(t, a) (__force t) a +#define __SC_TYPE(t, a) t #define __SC_ARGS(t, a) a #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long)) diff --git a/include/linux/timer.h b/include/linux/timer.h index d10bc7e73..a3d04c4f1 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -183,12 +183,20 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires); extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); +extern int timer_delete_sync(struct timer_list *timer); -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) - extern int del_timer_sync(struct timer_list *timer); -#else -# define del_timer_sync(t) del_timer(t) -#endif +/** + * del_timer_sync - Delete a pending timer and wait for a running callback + * @timer: The timer to be deleted + * + * See timer_delete_sync() for detailed explanation. + * + * Do not use in new code. Use timer_delete_sync() instead. + */ +static inline int del_timer_sync(struct timer_list *timer) +{ + return timer_delete_sync(timer); +} #define del_singleshot_timer_sync(t) del_timer_sync(t) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index f7ed0471d..dbf099315 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -577,7 +577,7 @@ struct trace_event_file { } \ early_initcall(trace_init_perf_perm_##name); -#define PERF_MAX_TRACE_SIZE 2048 +#define PERF_MAX_TRACE_SIZE 8192 #define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */ diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index e81856c0b..7c6f81b89 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -116,7 +116,11 @@ static inline void u64_stats_inc(u64_stats_t *p) #endif #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) -#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) +#define u64_stats_init(syncp) \ + do { \ + struct u64_stats_sync *__s = (syncp); \ + seqcount_init(&__s->seq); \ + } while (0) #else static inline void u64_stats_init(struct u64_stats_sync *syncp) { diff --git a/include/linux/udp.h b/include/linux/udp.h index ae58ff3b6..a22088001 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -131,6 +131,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, } } +DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); +#if IS_ENABLED(CONFIG_IPV6) +DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); +#endif + +static inline bool udp_encap_needed(void) +{ + if (static_branch_unlikely(&udp_encap_needed_key)) + return true; + +#if IS_ENABLED(CONFIG_IPV6) + if (static_branch_unlikely(&udpv6_encap_needed_key)) + return true; +#endif + + return false; +} + static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) { if (!skb_is_gso(skb)) @@ -142,6 +160,16 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist) return true; + /* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still + * land in a tunnel as the socket check in udp_gro_receive cannot be + * foolproof. + */ + if (udp_encap_needed() && + READ_ONCE(udp_sk(sk)->encap_rcv) && + !(skb_shinfo(skb)->gso_type & + (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM))) + return true; + return false; } diff --git a/include/linux/units.h b/include/linux/units.h index 3457179f7..b61e3f6d5 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -20,9 +20,13 @@ #define PICO 1000000000000ULL #define FEMTO 1000000000000000ULL -#define MILLIWATT_PER_WATT 1000L -#define MICROWATT_PER_MILLIWATT 1000L -#define MICROWATT_PER_WATT 1000000L +#define HZ_PER_KHZ 1000UL +#define KHZ_PER_MHZ 1000UL +#define HZ_PER_MHZ 1000000UL + +#define MILLIWATT_PER_WATT 1000UL +#define MICROWATT_PER_MILLIWATT 1000UL +#define MICROWATT_PER_WATT 1000000UL #define ABSOLUTE_ZERO_MILLICELSIUS -273150 diff --git a/include/linux/vfio.h b/include/linux/vfio.h index f479c5d7f..56d18912c 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -221,6 +221,7 @@ struct virqfd { wait_queue_entry_t wait; poll_table pt; struct work_struct shutdown; + struct work_struct flush_inject; struct virqfd **pvirqfd; }; @@ -229,5 +230,6 @@ extern int vfio_virqfd_enable(void *opaque, void (*thread)(void *, void *), void *data, struct virqfd **pvirqfd, int fd); extern void vfio_virqfd_disable(struct virqfd **pvirqfd); +void vfio_virqfd_flush_thread(struct virqfd **pvirqfd); #endif /* VFIO_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 76dad53a4..0fd47f2f3 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -112,6 +112,11 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller); +extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags); +extern void *vmalloc_array(size_t n, size_t size); +extern void *__vcalloc(size_t n, size_t size, gfp_t flags); +extern void *vcalloc(size_t n, size_t size); + extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 4d0c4cf1d..f666d3628 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -437,6 +437,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp) refcount_inc(&ifp->refcnt); } +static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp) +{ + return refcount_inc_not_zero(&ifp->refcnt); +} /* * compute link-local solicited-node multicast address diff --git a/include/net/af_unix.h b/include/net/af_unix.h index f42fdddec..349279c4d 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -47,12 +47,6 @@ struct scm_stat { #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) -#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) -#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) -#define unix_state_lock_nested(s) \ - spin_lock_nested(&unix_sk(s)->lock, \ - SINGLE_DEPTH_NESTING) - /* The AF_UNIX socket */ struct unix_sock { /* WARNING: sk has to be the first member */ @@ -62,7 +56,7 @@ struct unix_sock { struct mutex iolock, bindlock; struct sock *peer; struct list_head link; - atomic_long_t inflight; + unsigned long inflight; spinlock_t lock; unsigned long gc_flags; #define UNIX_GC_CANDIDATE 0 @@ -77,6 +71,23 @@ static inline struct unix_sock *unix_sk(const struct sock *sk) return (struct unix_sock *)sk; } +#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) +enum unix_socket_lock_class { + U_LOCK_NORMAL, + U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */ + U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */ + U_LOCK_GC_LISTENER, /* used for listening socket while determining gc + * candidates to close a small race window. + */ +}; + +static inline void unix_state_lock_nested(struct sock *sk, + enum unix_socket_lock_class subclass) +{ + spin_lock_nested(&unix_sk(sk)->lock, subclass); +} + #define peer_wait peer_wq.wait long unix_inq_len(struct sock *sk); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 355835639..7d2bd562d 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -487,6 +487,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk, return skb; } +static inline int bt_copy_from_sockptr(void *dst, size_t dst_size, + sockptr_t src, size_t src_size) +{ + if (dst_size > src_size) + return -EINVAL; + + return copy_from_sockptr(dst, src, dst_size); +} + int bt_to_errno(u16 code); void hci_sock_set_flag(struct sock *sk, int nr); diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 6ed07844e..5290781ab 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -257,6 +257,7 @@ struct ieee802154_llsec_key { struct ieee802154_llsec_key_entry { struct list_head list; + struct rcu_head rcu; struct ieee802154_llsec_key_id id; struct ieee802154_llsec_key *key; diff --git a/include/net/compat.h b/include/net/compat.h index 745db0d60..52bf5f0ee 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -81,13 +81,26 @@ struct compat_group_source_req { } __packed; struct compat_group_filter { - __u32 gf_interface; - struct __kernel_sockaddr_storage gf_group - __aligned(4); - __u32 gf_fmode; - __u32 gf_numsrc; - struct __kernel_sockaddr_storage gf_slist[1] - __aligned(4); + union { + struct { + __u32 gf_interface_aux; + struct __kernel_sockaddr_storage gf_group_aux + __aligned(4); + __u32 gf_fmode_aux; + __u32 gf_numsrc_aux; + struct __kernel_sockaddr_storage gf_slist[1] + __aligned(4); + } __packed; + struct { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group + __aligned(4); + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist_flex[] + __aligned(4); + } __packed; + }; } __packed; #endif /* NET_COMPAT_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index ff901aade..f5967805c 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -172,6 +172,7 @@ void inet_csk_init_xmit_timers(struct sock *sk, void (*delack_handler)(struct timer_list *), void (*keepalive_handler)(struct timer_list *)); void inet_csk_clear_xmit_timers(struct sock *sk); +void inet_csk_clear_xmit_timers_sync(struct sock *sk); static inline void inet_csk_schedule_ack(struct sock *sk) { @@ -339,4 +340,12 @@ static inline bool inet_csk_has_ulp(struct sock *sk) return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; } +static inline void inet_init_csk_locks(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + spin_lock_init(&icsk->icsk_accept_queue.rskq_lock); + spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock); +} + #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 58d8e6260..1f016af06 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -333,6 +333,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb) return pskb_network_may_pull(skb, nhlen); } +/* Variant of pskb_inet_may_pull(). + */ +static inline bool skb_vlan_inet_prepare(struct sk_buff *skb) +{ + int nhlen = 0, maclen = ETH_HLEN; + __be16 type = skb->protocol; + + /* Essentially this is skb_protocol(skb, true) + * And we get MAC len. + */ + if (eth_type_vlan(type)) + type = __vlan_get_protocol(skb, type, &maclen); + + switch (type) { +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + nhlen = sizeof(struct ipv6hdr); + break; +#endif + case htons(ETH_P_IP): + nhlen = sizeof(struct iphdr); + break; + } + /* For ETH_P_IPV6/ETH_P_IP we make sure to pull + * a base network header in skb->head. + */ + if (!pskb_may_pull(skb, maclen + nhlen)) + return false; + + skb_set_network_header(skb, maclen); + return true; +} + static inline int ip_encap_hlen(struct ip_tunnel_encap *e) { const struct ip_tunnel_encap_ops *ops; diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 49aa79c7b..581cd37aa 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, */ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) { - if (skb->protocol == htons(ETH_P_802_2)) - memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); + memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); } /** @@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) */ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) { - if (skb->protocol == htons(ETH_P_802_2)) - memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); + memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); } /** diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 223765751..2da11d8c0 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -142,9 +142,9 @@ static inline u16 nft_reg_load16(const u32 *sreg) return *(u16 *)sreg; } -static inline void nft_reg_store64(u32 *dreg, u64 val) +static inline void nft_reg_store64(u64 *dreg, u64 val) { - put_unaligned(val, (u64 *)dreg); + put_unaligned(val, dreg); } static inline u64 nft_reg_load64(const u32 *sreg) diff --git a/include/net/sock.h b/include/net/sock.h index 87ee284ea..8bcc96bf2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1681,6 +1681,13 @@ static inline void sock_owned_by_me(const struct sock *sk) #endif } +static inline void sock_not_owned_by_me(const struct sock *sk) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks); +#endif +} + static inline bool sock_owned_by_user(const struct sock *sk) { sock_owned_by_me(sk); diff --git a/include/net/tcp.h b/include/net/tcp.h index 5c03dc6d0..d6e4b6f7d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2224,7 +2224,7 @@ struct tcp_ulp_ops { /* cleanup ulp */ void (*release)(struct sock *sk); /* diagnostic */ - int (*get_info)(const struct sock *sk, struct sk_buff *skb); + int (*get_info)(struct sock *sk, struct sk_buff *skb); size_t (*get_info_size)(const struct sock *sk); /* clone ulp */ void (*clone)(const struct request_sock *req, struct sock *newsk, diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 5339baadc..39c7a36cd 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -178,16 +178,17 @@ static inline int scsi_is_wlun(u64 lun) /* * Internal return values. */ - -#define NEEDS_RETRY 0x2001 -#define SUCCESS 0x2002 -#define FAILED 0x2003 -#define QUEUED 0x2004 -#define SOFT_ERROR 0x2005 -#define ADD_TO_MLQUEUE 0x2006 -#define TIMEOUT_ERROR 0x2007 -#define SCSI_RETURN_NOT_HANDLED 0x2008 -#define FAST_IO_FAIL 0x2009 +enum scsi_disposition { + NEEDS_RETRY = 0x2001, + SUCCESS = 0x2002, + FAILED = 0x2003, + QUEUED = 0x2004, + SOFT_ERROR = 0x2005, + ADD_TO_MLQUEUE = 0x2006, + TIMEOUT_ERROR = 0x2007, + SCSI_RETURN_NOT_HANDLED = 0x2008, + FAST_IO_FAIL = 0x2009, +}; /* * Midlevel queue return values. diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h index 2852e470a..47ccf2f11 100644 --- a/include/scsi/scsi_dh.h +++ b/include/scsi/scsi_dh.h @@ -52,7 +52,8 @@ struct scsi_device_handler { /* Filled by the hardware handler */ struct module *module; const char *name; - int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); + enum scsi_disposition (*check_sense)(struct scsi_device *, + struct scsi_sense_hdr *); int (*attach)(struct scsi_device *); void (*detach)(struct scsi_device *); int (*activate)(struct scsi_device *, activate_complete, void *); diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 6bd5ed695..468094254 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -17,7 +17,7 @@ extern void scsi_report_device_reset(struct Scsi_Host *, int, int); extern int scsi_block_when_processing_errors(struct scsi_device *); extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, struct scsi_sense_hdr *sshdr); -extern int scsi_check_sense(struct scsi_cmnd *); +extern enum scsi_disposition scsi_check_sense(struct scsi_cmnd *); static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr) { diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 9f484113c..3cecbfdb0 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -1171,6 +1171,15 @@ int qman_delete_cgr(struct qman_cgr *cgr); void qman_delete_cgr_safe(struct qman_cgr *cgr); /** + * qman_update_cgr_safe - Modifies a congestion group object from any CPU + * @cgr: the 'cgr' object to modify + * @opts: state of the CGR settings + * + * This will select the proper CPU and modify the CGR settings. + */ +int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts); + +/** * qman_query_cgr_congested - Queries CGR's congestion status * @cgr: the 'cgr' object to query * @result: returns 'cgr's congestion status, 1 (true) if congested diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 330d32d84..a50df4163 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -53,14 +53,14 @@ TRACE_EVENT(qdisc_reset, TP_ARGS(q), TP_STRUCT__entry( - __string( dev, qdisc_dev(q) ) - __string( kind, q->ops->id ) - __field( u32, parent ) - __field( u32, handle ) + __string( dev, qdisc_dev(q)->name ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q)); + __assign_str(dev, qdisc_dev(q)->name); __assign_str(kind, q->ops->id); __entry->parent = q->parent; __entry->handle = q->handle; @@ -78,14 +78,14 @@ TRACE_EVENT(qdisc_destroy, TP_ARGS(q), TP_STRUCT__entry( - __string( dev, qdisc_dev(q) ) - __string( kind, q->ops->id ) - __field( u32, parent ) - __field( u32, handle ) + __string( dev, qdisc_dev(q)->name ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q)); + __assign_str(dev, qdisc_dev(q)->name); __assign_str(kind, q->ops->id); __entry->parent = q->parent; __entry->handle = q->handle; diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index e65300d63..cf4431b74 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -576,6 +576,9 @@ struct btrfs_ioctl_clone_range_args { */ #define BTRFS_DEFRAG_RANGE_COMPRESS 1 #define BTRFS_DEFRAG_RANGE_START_IO 2 +#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \ + BTRFS_DEFRAG_RANGE_START_IO) + struct btrfs_ioctl_defrag_range_args { /* start of the defrag operation */ __u64 start; diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 3960bc3da..c4702fff6 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -190,11 +190,22 @@ struct ip_mreq_source { }; struct ip_msfilter { - __be32 imsf_multiaddr; - __be32 imsf_interface; - __u32 imsf_fmode; - __u32 imsf_numsrc; - __be32 imsf_slist[1]; + union { + struct { + __be32 imsf_multiaddr_aux; + __be32 imsf_interface_aux; + __u32 imsf_fmode_aux; + __u32 imsf_numsrc_aux; + __be32 imsf_slist[1]; + }; + struct { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + __be32 imsf_slist_flex[]; + }; + }; }; #define IP_MSFILTER_SIZE(numsrc) \ @@ -213,11 +224,22 @@ struct group_source_req { }; struct group_filter { - __u32 gf_interface; /* interface index */ - struct __kernel_sockaddr_storage gf_group; /* multicast address */ - __u32 gf_fmode; /* filter mode */ - __u32 gf_numsrc; /* number of sources */ - struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */ + union { + struct { + __u32 gf_interface_aux; /* interface index */ + struct __kernel_sockaddr_storage gf_group_aux; /* multicast address */ + __u32 gf_fmode_aux; /* filter mode */ + __u32 gf_numsrc_aux; /* number of sources */ + struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */ + }; + struct { + __u32 gf_interface; /* interface index */ + struct __kernel_sockaddr_storage gf_group; /* multicast address */ + __u32 gf_fmode; /* filter mode */ + __u32 gf_numsrc; /* number of sources */ + struct __kernel_sockaddr_storage gf_slist_flex[]; /* interface index */ + }; + }; }; #define GROUP_FILTER_SIZE(numsrc) \ diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 7989d9483..bed20a89c 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -602,6 +602,7 @@ #define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ #define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */ +#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */ #define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */ #define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 163b7ec57..f93ffb1b6 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -262,9 +262,11 @@ enum nft_rule_attributes { /** * enum nft_rule_compat_flags - nf_tables rule compat flags * + * @NFT_RULE_COMPAT_F_UNUSED: unused * @NFT_RULE_COMPAT_F_INV: invert the check result */ enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = (1 << 0), NFT_RULE_COMPAT_F_INV = (1 << 1), NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV, }; diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index c3725b492..46c7bab50 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -28,4 +28,27 @@ struct { MEMBERS } ATTRS; \ struct TAG { MEMBERS } ATTRS NAME; \ } + +#ifdef __cplusplus +/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */ +#define __DECLARE_FLEX_ARRAY(T, member) \ + T member[0] +#else +/** + * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union + * + * @TYPE: The type of each flexible array element + * @NAME: The name of the flexible array member + * + * In order to have a flexible array member in a union or alone in a + * struct, it needs to be wrapped in an anonymous struct with at least 1 + * named member, but that member can be empty. + */ +#define __DECLARE_FLEX_ARRAY(TYPE, NAME) \ + struct { \ + struct { } __empty_ ## NAME; \ + TYPE NAME[]; \ + } +#endif + #endif diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h index 8c704e510..91d4be987 100644 --- a/include/uapi/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h @@ -916,7 +916,9 @@ enum fc_els_clid_ic { ELS_CLID_IC_LIP = 8, /* receiving LIP */ }; - +/* + * Link Integrity event types + */ enum fc_fpin_li_event_types { FPIN_LI_UNKNOWN = 0x0, FPIN_LI_LINK_FAILURE = 0x1, @@ -943,6 +945,54 @@ enum fc_fpin_li_event_types { { FPIN_LI_DEVICE_SPEC, "Device Specific" }, \ } +/* + * Delivery event types + */ +enum fc_fpin_deli_event_types { + FPIN_DELI_UNKNOWN = 0x0, + FPIN_DELI_TIMEOUT = 0x1, + FPIN_DELI_UNABLE_TO_ROUTE = 0x2, + FPIN_DELI_DEVICE_SPEC = 0xF, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_FPIN_DELI_EVT_TYPES_INIT { \ + { FPIN_DELI_UNKNOWN, "Unknown" }, \ + { FPIN_DELI_TIMEOUT, "Timeout" }, \ + { FPIN_DELI_UNABLE_TO_ROUTE, "Unable to Route" }, \ + { FPIN_DELI_DEVICE_SPEC, "Device Specific" }, \ +} + +/* + * Congestion event types + */ +enum fc_fpin_congn_event_types { + FPIN_CONGN_CLEAR = 0x0, + FPIN_CONGN_LOST_CREDIT = 0x1, + FPIN_CONGN_CREDIT_STALL = 0x2, + FPIN_CONGN_OVERSUBSCRIPTION = 0x3, + FPIN_CONGN_DEVICE_SPEC = 0xF, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_FPIN_CONGN_EVT_TYPES_INIT { \ + { FPIN_CONGN_CLEAR, "Clear" }, \ + { FPIN_CONGN_LOST_CREDIT, "Lost Credit" }, \ + { FPIN_CONGN_CREDIT_STALL, "Credit Stall" }, \ + { FPIN_CONGN_OVERSUBSCRIPTION, "Oversubscription" }, \ + { FPIN_CONGN_DEVICE_SPEC, "Device Specific" }, \ +} + +enum fc_fpin_congn_severity_types { + FPIN_CONGN_SEVERITY_WARNING = 0xF1, + FPIN_CONGN_SEVERITY_ERROR = 0xF7, +}; /* * Link Integrity Notification Descriptor @@ -975,6 +1025,68 @@ struct fc_fn_li_desc { }; /* + * Delivery Notification Descriptor + */ +struct fc_fn_deli_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020002) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be64 detecting_wwpn; /* Port Name that detected event */ + __be64 attached_wwpn; /* Port Name of device attached to + * detecting Port Name + */ + __be32 deli_reason_code;/* see enum fc_fpin_deli_event_types */ +}; + +/* + * Peer Congestion Notification Descriptor + */ +struct fc_fn_peer_congn_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020003) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be64 detecting_wwpn; /* Port Name that detected event */ + __be64 attached_wwpn; /* Port Name of device attached to + * detecting Port Name + */ + __be16 event_type; /* see enum fc_fpin_congn_event_types */ + __be16 event_modifier; /* Implementation specific value + * describing the event type + */ + __be32 event_period; /* duration (ms) of the detected + * congestion event + */ + __be32 pname_count; /* number of portname_list elements */ + __be64 pname_list[0]; /* list of N_Port_Names accessible + * through the attached port + */ +}; + +/* + * Congestion Notification Descriptor + */ +struct fc_fn_congn_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020004) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be16 event_type; /* see enum fc_fpin_congn_event_types */ + __be16 event_modifier; /* Implementation specific value + * describing the event type + */ + __be32 event_period; /* duration (ms) of the detected + * congestion event + */ + __u8 severity; /* command */ + __u8 resv[3]; /* reserved - must be zero */ +}; + +/* * ELS_FPIN - Fabric Performance Impact Notification */ struct fc_els_fpin { |