diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
commit | dc50eab76b709d68175a358d6e23a5a3890764d3 (patch) | |
tree | c754d0390db060af0213ff994f0ac310e4cfd6e9 /include | |
parent | Adding debian version 6.6.15-2. (diff) | |
download | linux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include')
579 files changed, 14342 insertions, 4216 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0b7eab0ef7..1216d72c65 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -12,11 +12,9 @@ #include <linux/device.h> #include <linux/property.h> -/* TBD: Make dynamic */ -#define ACPI_MAX_HANDLES 10 struct acpi_handle_list { u32 count; - acpi_handle handles[ACPI_MAX_HANDLES]; + acpi_handle* handles; }; /* acpi_utils.h */ @@ -32,6 +30,11 @@ acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, struct acpi_handle_list *list); +bool acpi_handle_list_equal(struct acpi_handle_list *list1, + struct acpi_handle_list *list2); +void acpi_handle_list_replace(struct acpi_handle_list *dst, + struct acpi_handle_list *src); +void acpi_handle_list_free(struct acpi_handle_list *list); acpi_status acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code, struct acpi_buffer *status_buf); @@ -517,7 +520,7 @@ int acpi_bus_attach_private_data(acpi_handle, void *); void acpi_bus_detach_private_data(acpi_handle); int acpi_dev_install_notify_handler(struct acpi_device *adev, u32 handler_type, - acpi_notify_handler handler); + acpi_notify_handler handler, void *context); void acpi_dev_remove_notify_handler(struct acpi_device *adev, u32 handler_type, acpi_notify_handler handler); @@ -761,6 +764,7 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); } +bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2); bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2); int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer); diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 8d5572ad48..a33375e055 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -465,6 +465,9 @@ struct acpi_cdat_sslbe { u16 reserved; }; +#define ACPI_CDAT_SSLBIS_US_PORT 0x0100 +#define ACPI_CDAT_SSLBIS_ANY_PORT 0xffff + /******************************************************************************* * * CEDT - CXL Early Discovery Table diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h index 73e806fe7c..9b373d172a 100644 --- a/include/acpi/pcc.h +++ b/include/acpi/pcc.h @@ -18,7 +18,20 @@ struct pcc_mbox_chan { u16 min_turnaround_time; }; +/* Generic Communications Channel Shared Memory Region */ +#define PCC_SIGNATURE 0x50434300 +/* Generic Communications Channel Command Field */ +#define PCC_CMD_GENERATE_DB_INTR BIT(15) +/* Generic Communications Channel Status Field */ +#define PCC_STATUS_CMD_COMPLETE BIT(0) +#define PCC_STATUS_SCI_DOORBELL BIT(1) +#define PCC_STATUS_ERROR BIT(2) +#define PCC_STATUS_PLATFORM_NOTIFY BIT(3) +/* Initiator Responder Communications Channel Flags */ +#define PCC_CMD_COMPLETION_NOTIFY BIT(0) + #define MAX_PCC_SUBSPACES 256 + #ifdef CONFIG_PCC extern struct pcc_mbox_chan * pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id); diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 941be574bb..def242528b 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -2,7 +2,7 @@ # # asm headers that all architectures except um should have # (This file is not included when SRCARCH=um since UML borrows several -# asm headers from the host architecutre.) +# asm headers from the host architecture.) mandatory-y += atomic.h mandatory-y += archrandom.h diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index eb64bd4f11..542d3727ee 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -58,27 +58,25 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) return arch_test_and_set_bit_lock(nr, addr); } -#if defined(arch_clear_bit_unlock_is_negative_byte) /** - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from + * xor_unlock_is_negative_byte - XOR a single byte in memory and test if + * it is negative, for unlock. + * @mask: Change the bits which are set in this mask. + * @addr: The address of the word containing the byte to change. * + * Changes some of bits 0-6 in the word pointed to by @addr. * This operation is atomic and provides release barrier semantics. + * Used to optimise some folio operations which are commonly paired + * with an unlock or end of writeback. Bit 7 is used as PG_waiters to + * indicate whether anybody is waiting for the unlock. * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, + * Return: Whether the top bit of the byte is set. */ -static inline bool -clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +static inline bool xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) { kcsan_release(); - instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_clear_bit_unlock_is_negative_byte(nr, addr); + instrument_atomic_write(addr, sizeof(long)); + return arch_xor_unlock_is_negative_byte(mask, addr); } -/* Let everybody know we have it. */ -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte -#endif - #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 40913516e6..14d4ec8c51 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -66,27 +66,15 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) raw_atomic_long_set_release((atomic_long_t *)p, old); } -/** - * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from - * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, - */ -#ifndef arch_clear_bit_unlock_is_negative_byte -static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, - volatile unsigned long *p) +#ifndef arch_xor_unlock_is_negative_byte +static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) { long old; - unsigned long mask = BIT_MASK(nr); - p += BIT_WORD(nr); - old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte #endif #include <asm-generic/bitops/instrumented-lock.h> diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 84ec53ccc4..7ee8a179d1 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -91,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) } #endif +#ifndef flush_cache_vmap_early +static inline void flush_cache_vmap_early(unsigned long start, unsigned long end) +{ +} +#endif + #ifndef flush_cache_vunmap static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h index bb7ee9c70e..6ccabb400a 100644 --- a/include/asm-generic/fb.h +++ b/include/asm-generic/fb.h @@ -12,14 +12,14 @@ #include <linux/pgtable.h> struct fb_info; -struct file; -#ifndef fb_pgprotect -#define fb_pgprotect fb_pgprotect -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +#ifndef pgprot_framebuffer +#define pgprot_framebuffer pgprot_framebuffer +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + return pgprot_writecombine(prot); } #endif diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h index 1a3ad6d298..c32e0cf23c 100644 --- a/include/asm-generic/numa.h +++ b/include/asm-generic/numa.h @@ -35,6 +35,7 @@ int __init numa_add_memblk(int nodeid, u64 start, u64 end); void __init numa_set_distance(int from, int to, int distance); void __init numa_free_distance(void); void __init early_map_cpu_to_node(unsigned int cpu, int nid); +int __init early_cpu_to_node(int cpu); void numa_store_cpu_info(unsigned int cpu); void numa_add_cpu(unsigned int cpu); void numa_remove_cpu(unsigned int cpu); @@ -46,6 +47,7 @@ static inline void numa_add_cpu(unsigned int cpu) { } static inline void numa_remove_cpu(unsigned int cpu) { } static inline void arch_numa_init(void) { } static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } +static inline int early_cpu_to_node(int cpu) { return 0; } #endif /* CONFIG_NUMA */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a7538..879e5f8aa5 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -169,6 +169,8 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) ptdesc = pagetable_alloc(gfp, 0); if (!ptdesc) return NULL; + + pagetable_pud_ctor(ptdesc); return ptdesc_address(ptdesc); } @@ -190,8 +192,11 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline void __pud_free(struct mm_struct *mm, pud_t *pud) { + struct ptdesc *ptdesc = virt_to_ptdesc(pud); + BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - pagetable_free(virt_to_ptdesc(pud)); + pagetable_pud_dtor(ptdesc); + pagetable_free(ptdesc); } #ifndef __HAVE_ARCH_PUD_FREE diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index fdfebcb050..90803a826b 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -68,11 +68,18 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) smp_store_release(ptr, (u16)val + 1); } +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + u32 val = lock.counter; + + return ((val >> 16) == (val & 0xffff)); +} + static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) { - u32 val = atomic_read(lock); + arch_spinlock_t val = READ_ONCE(*lock); - return ((val >> 16) != (val & 0xffff)); + return !arch_spin_value_unlocked(val); } static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) @@ -82,11 +89,6 @@ static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) return (s16)((val >> 16) - (val & 0xffff)) > 1; } -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return !arch_spin_is_locked(&lock); -} - #include <asm/qrwlock.h> #endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 699650f819..a84c64e5f1 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -104,9 +104,9 @@ static inline u32 get_unaligned_le24(const void *p) static inline void __put_unaligned_be24(const u32 val, u8 *p) { - *p++ = val >> 16; - *p++ = val >> 8; - *p++ = val; + *p++ = (val >> 16) & 0xff; + *p++ = (val >> 8) & 0xff; + *p++ = val & 0xff; } static inline void put_unaligned_be24(const u32 val, void *p) @@ -116,9 +116,9 @@ static inline void put_unaligned_be24(const u32 val, void *p) static inline void __put_unaligned_le24(const u32 val, u8 *p) { - *p++ = val; - *p++ = val >> 8; - *p++ = val >> 16; + *p++ = val & 0xff; + *p++ = (val >> 8) & 0xff; + *p++ = (val >> 16) & 0xff; } static inline void put_unaligned_le24(const u32 val, void *p) @@ -128,12 +128,12 @@ static inline void put_unaligned_le24(const u32 val, void *p) static inline void __put_unaligned_be48(const u64 val, u8 *p) { - *p++ = val >> 40; - *p++ = val >> 32; - *p++ = val >> 24; - *p++ = val >> 16; - *p++ = val >> 8; - *p++ = val; + *p++ = (val >> 40) & 0xff; + *p++ = (val >> 32) & 0xff; + *p++ = (val >> 24) & 0xff; + *p++ = (val >> 16) & 0xff; + *p++ = (val >> 8) & 0xff; + *p++ = val & 0xff; } static inline void put_unaligned_be48(const u64 val, void *p) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67d8dd2f1b..bae0fe4d49 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -356,7 +356,6 @@ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data*) \ - MEM_KEEP(exit.data*) \ *(.data.unlikely) \ __start_once = .; \ *(.data.once) \ @@ -521,7 +520,6 @@ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ MEM_KEEP(init.rodata) \ - MEM_KEEP(exit.rodata) \ } \ \ /* Built-in module parameters. */ \ @@ -574,7 +572,6 @@ *(.ref.text) \ *(.text.asan.* .text.tsan.*) \ MEM_KEEP(init.text*) \ - MEM_KEEP(exit.text*) \ /* sched.text is aling to function alignment to secure we have same @@ -714,13 +711,10 @@ *(.exit.data .exit.data.*) \ *(.fini_array .fini_array.*) \ *(.dtors .dtors.*) \ - MEM_DISCARD(exit.data*) \ - MEM_DISCARD(exit.rodata*) #define EXIT_TEXT \ *(.exit.text) \ *(.text.exit) \ - MEM_DISCARD(exit.text) #define EXIT_CALL \ *(.exitcall.exit) diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 35e45b854a..51382befbe 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -217,6 +217,18 @@ static inline void crypto_free_aead(struct crypto_aead *tfm) crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); } +/** + * crypto_has_aead() - Search for the availability of an aead. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * aead + * @type: specifies the type of the aead + * @mask: specifies the mask for the aead + * + * Return: true when the aead is known to the kernel crypto API; false + * otherwise + */ +int crypto_has_aead(const char *alg_name, u32 type, u32 mask); + static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm) { return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 670508f1dc..31c111bebb 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -382,7 +382,7 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: zero on success; error code in case of error @@ -400,7 +400,7 @@ int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm, * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: Output length on success; error code in case of error diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index ca86f4c6ba..7a4a71af65 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -195,11 +195,6 @@ static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm, return PTR_ALIGN(crypto_tfm_ctx(tfm), align); } -static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) -{ - return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1); -} - static inline unsigned int crypto_dma_align(void) { return CRYPTO_DMA_ALIGN; diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 2835069c59..545dbefe3e 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -78,7 +78,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen); -int crypto_engine_exit(struct crypto_engine *engine); +void crypto_engine_exit(struct crypto_engine *engine); int crypto_engine_register_aead(struct aead_engine_alg *alg); void crypto_engine_unregister_aead(struct aead_engine_alg *alg); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index f7c2a22cd7..c7bdbece27 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -250,16 +250,7 @@ struct shash_alg { #undef HASH_ALG_COMMON_STAT struct crypto_ahash { - int (*init)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*finup)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*export)(struct ahash_request *req, void *out); - int (*import)(struct ahash_request *req, const void *in); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - + bool using_shash; /* Underlying algorithm is shash, not ahash */ unsigned int statesize; unsigned int reqsize; struct crypto_tfm base; @@ -342,12 +333,6 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); } -static inline unsigned int crypto_ahash_alignmask( - struct crypto_ahash *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); -} - /** * crypto_ahash_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -519,10 +504,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * Return: 0 if the export was successful; < 0 if an error occurred */ -static inline int crypto_ahash_export(struct ahash_request *req, void *out) -{ - return crypto_ahash_reqtfm(req)->export(req, out); -} +int crypto_ahash_export(struct ahash_request *req, void *out); /** * crypto_ahash_import() - import message digest state @@ -535,15 +517,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) * * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_ahash_import(struct ahash_request *req, const void *in) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->import(req, in); -} +int crypto_ahash_import(struct ahash_request *req, const void *in); /** * crypto_ahash_init() - (re)initialize message digest handle @@ -556,36 +530,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->init(req); -} - -static inline struct crypto_istat_hash *hash_get_stat( - struct hash_alg_common *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif -} - -static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) -{ - if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) - return err; - - if (err && err != -EINPROGRESS && err != -EBUSY) - atomic64_inc(&hash_get_stat(alg)->err_cnt); - - return err; -} +int crypto_ahash_init(struct ahash_request *req); /** * crypto_ahash_update() - add data to message digest for processing @@ -598,16 +543,7 @@ static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err) * * Return: see crypto_ahash_final() */ -static inline int crypto_ahash_update(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_alg_common *alg = crypto_hash_alg_common(tfm); - - if (IS_ENABLED(CONFIG_CRYPTO_STATS)) - atomic64_add(req->nbytes, &hash_get_stat(alg)->hash_tlen); - - return crypto_hash_errstat(alg, tfm->update(req)); -} +int crypto_ahash_update(struct ahash_request *req); /** * DOC: Asynchronous Hash Request Handle @@ -798,12 +734,6 @@ static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm) return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); } -static inline unsigned int crypto_shash_alignmask( - struct crypto_shash *tfm) -{ - return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); -} - /** * crypto_shash_blocksize() - obtain block size for cipher * @tfm: cipher handle @@ -952,10 +882,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, * Context: Any context. * Return: 0 if the export creation was successful; < 0 if an error occurred */ -static inline int crypto_shash_export(struct shash_desc *desc, void *out) -{ - return crypto_shash_alg(desc->tfm)->export(desc, out); -} +int crypto_shash_export(struct shash_desc *desc, void *out); /** * crypto_shash_import() - import operational state @@ -969,15 +896,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) * Context: Any context. * Return: 0 if the import was successful; < 0 if an error occurred */ -static inline int crypto_shash_import(struct shash_desc *desc, const void *in) -{ - struct crypto_shash *tfm = desc->tfm; - - if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_shash_alg(tfm)->import(desc, in); -} +int crypto_shash_import(struct shash_desc *desc, const void *in); /** * crypto_shash_init() - (re)initialize message digest diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h index dd4f067850..d6927739f8 100644 --- a/include/crypto/hash_info.h +++ b/include/crypto/hash_info.h @@ -10,6 +10,7 @@ #include <crypto/sha1.h> #include <crypto/sha2.h> +#include <crypto/sha3.h> #include <crypto/md5.h> #include <crypto/streebog.h> diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index cf65676e45..59c707e4de 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -18,15 +18,13 @@ struct crypto_hash_walk { char *data; unsigned int offset; - unsigned int alignmask; + unsigned int flags; struct page *pg; unsigned int entrylen; unsigned int total; struct scatterlist *sg; - - unsigned int flags; }; struct ahash_instance { @@ -269,11 +267,6 @@ static inline struct crypto_shash *crypto_spawn_shash( return crypto_spawn_tfm2(&spawn->base); } -static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) -{ - return crypto_tfm_ctx_aligned(&tfm->base); -} - static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_shash, base); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fb3d9e899f..7ae42afdcf 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -36,10 +36,25 @@ struct skcipher_instance { }; }; +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *inst); + union { + struct { + char head[offsetof(struct lskcipher_alg, co.base)]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + struct skcipher_walk { union { struct { @@ -80,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance( return &inst->s.base; } +static inline struct crypto_instance *lskcipher_crypto_instance( + struct lskcipher_instance *inst) +{ + return &inst->s.base; +} + static inline struct skcipher_instance *skcipher_alg_instance( struct crypto_skcipher *skcipher) { @@ -87,11 +108,23 @@ static inline struct skcipher_instance *skcipher_alg_instance( struct skcipher_instance, alg); } +static inline struct lskcipher_instance *lskcipher_alg_instance( + struct crypto_lskcipher *lskcipher) +{ + return container_of(crypto_lskcipher_alg(lskcipher), + struct lskcipher_instance, alg); +} + static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) { return crypto_instance_ctx(skcipher_crypto_instance(inst)); } +static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst) +{ + return crypto_instance_ctx(lskcipher_crypto_instance(inst)); +} + static inline void skcipher_request_complete(struct skcipher_request *req, int err) { crypto_request_complete(&req->base, err); @@ -101,21 +134,36 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) { crypto_drop_spawn(&spawn->base); } -static inline struct skcipher_alg *crypto_skcipher_spawn_alg( - struct crypto_skcipher_spawn *spawn) +static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg( + struct crypto_lskcipher_spawn *spawn) { - return container_of(spawn->base.alg, struct skcipher_alg, base); + return container_of(spawn->base.alg, struct lskcipher_alg, co.base); } -static inline struct skcipher_alg *crypto_spawn_skcipher_alg( +static inline struct skcipher_alg_common *crypto_spawn_skcipher_alg_common( struct crypto_skcipher_spawn *spawn) { - return crypto_skcipher_spawn_alg(spawn); + return container_of(spawn->base.alg, struct skcipher_alg_common, base); +} + +static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_lskcipher_spawn_alg(spawn); } static inline struct crypto_skcipher *crypto_spawn_skcipher( @@ -124,6 +172,12 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher( return crypto_spawn_tfm2(&spawn->base); } +static inline struct crypto_lskcipher *crypto_spawn_lskcipher( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + static inline void crypto_skcipher_set_reqsize( struct crypto_skcipher *skcipher, unsigned int reqsize) { @@ -144,6 +198,13 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst); +int crypto_register_lskcipher(struct lskcipher_alg *alg); +void crypto_unregister_lskcipher(struct lskcipher_alg *alg); +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count); +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst); + int skcipher_walk_done(struct skcipher_walk *walk, int err); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, @@ -166,6 +227,11 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm) { return crypto_tfm_ctx_dma(&tfm->base); @@ -191,41 +257,6 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req) return req->base.flags; } -static inline unsigned int crypto_skcipher_alg_min_keysize( - struct skcipher_alg *alg) -{ - return alg->min_keysize; -} - -static inline unsigned int crypto_skcipher_alg_max_keysize( - struct skcipher_alg *alg) -{ - return alg->max_keysize; -} - -static inline unsigned int crypto_skcipher_alg_walksize( - struct skcipher_alg *alg) -{ - return alg->walksize; -} - -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); -} - /* Helpers for simple block cipher modes of operation */ struct skcipher_ctx_simple { struct crypto_cipher *cipher; /* underlying block cipher */ @@ -249,5 +280,24 @@ static inline struct crypto_alg *skcipher_ialg_simple( return crypto_spawn_cipher_alg(spawn); } +static inline struct crypto_lskcipher *lskcipher_cipher_simple( + struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + return *ctx; +} + +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct lskcipher_alg *lskcipher_ialg_simple( + struct lskcipher_instance *inst) +{ + struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst); + + return crypto_lskcipher_spawn_alg(spawn); +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/sig.h b/include/crypto/sig.h index 641b4714c4..d25186bb2b 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -79,7 +79,7 @@ int crypto_sig_maxsize(struct crypto_sig *tfm); * @tfm: signature tfm handle allocated with crypto_alloc_sig() * @src: source buffer * @slen: source length - * @dst: destinatino obuffer + * @dst: destination obuffer * @dlen: destination length * * Return: zero on success; error code in case of error diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 080d1ba361..ea18af4834 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -49,6 +49,10 @@ struct crypto_sync_skcipher { struct crypto_skcipher base; }; +struct crypto_lskcipher { + struct crypto_tfm base; +}; + /* * struct crypto_istat_cipher - statistics for cipher algorithm * @encrypt_cnt: number of encrypt requests @@ -65,6 +69,43 @@ struct crypto_istat_cipher { atomic64_t err_cnt; }; +#ifdef CONFIG_CRYPTO_STATS +#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; +#else +#define SKCIPHER_ALG_COMMON_STAT +#endif + +/* + * struct skcipher_alg_common - common properties of skcipher_alg + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @stat: Statistics for cipher algorithm + * @base: Definition of a generic crypto algorithm. + */ +#define SKCIPHER_ALG_COMMON { \ + unsigned int min_keysize; \ + unsigned int max_keysize; \ + unsigned int ivsize; \ + unsigned int chunksize; \ + \ + SKCIPHER_ALG_COMMON_STAT \ + \ + struct crypto_alg base; \ +} +struct skcipher_alg_common SKCIPHER_ALG_COMMON; + /** * struct skcipher_alg - symmetric key cipher definition * @min_keysize: Minimum key size supported by the transformation. This is the @@ -120,6 +161,7 @@ struct crypto_istat_cipher { * in parallel. Should be a multiple of chunksize. * @stat: Statistics for cipher algorithm * @base: Definition of a generic crypto algorithm. + * @co: see struct skcipher_alg_common * * All fields except @ivsize are mandatory and must be filled. */ @@ -131,17 +173,55 @@ struct skcipher_alg { int (*init)(struct crypto_skcipher *tfm); void (*exit)(struct crypto_skcipher *tfm); - unsigned int min_keysize; - unsigned int max_keysize; - unsigned int ivsize; - unsigned int chunksize; unsigned int walksize; -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_cipher stat; -#endif + union { + struct SKCIPHER_ALG_COMMON; + struct skcipher_alg_common co; + }; +}; - struct crypto_alg base; +/** + * struct lskcipher_alg - linear symmetric key cipher definition + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a number of bytes. This function is used to encrypt + * the supplied data. This function shall not modify + * the transformation context, as this function may be called + * in parallel with the same transformation object. Data + * may be left over if length is not a multiple of blocks + * and there is more to come (final == false). The number of + * left-over bytes should be returned in case of success. + * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to + * @encrypt and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @co: see struct skcipher_alg_common + */ +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*init)(struct crypto_lskcipher *tfm); + void (*exit)(struct crypto_lskcipher *tfm); + + struct skcipher_alg_common co; }; #define MAX_SYNC_SKCIPHER_REQSIZE 384 @@ -213,12 +293,36 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, u32 type, u32 mask); + +/** + * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * lskcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an lskcipher. The returned struct + * crypto_lskcipher is the cipher handle that is required for any subsequent + * API invocation for that lskcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask); + static inline struct crypto_tfm *crypto_skcipher_tfm( struct crypto_skcipher *tfm) { return &tfm->base; } +static inline struct crypto_tfm *crypto_lskcipher_tfm( + struct crypto_lskcipher *tfm) +{ + return &tfm->base; +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed @@ -236,6 +340,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) } /** + * crypto_free_lskcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm)); +} + +/** * crypto_has_skcipher() - Search for the availability of an skcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * skcipher @@ -253,6 +368,19 @@ static inline const char *crypto_skcipher_driver_name( return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); } +static inline const char *crypto_lskcipher_driver_name( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm)); +} + +static inline struct skcipher_alg_common *crypto_skcipher_alg_common( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg_common, base); +} + static inline struct skcipher_alg *crypto_skcipher_alg( struct crypto_skcipher *tfm) { @@ -260,9 +388,11 @@ static inline struct skcipher_alg *crypto_skcipher_alg( struct skcipher_alg, base); } -static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) +static inline struct lskcipher_alg *crypto_lskcipher_alg( + struct crypto_lskcipher *tfm) { - return alg->ivsize; + return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg, + struct lskcipher_alg, co.base); } /** @@ -276,7 +406,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) */ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->ivsize; + return crypto_skcipher_alg_common(tfm)->ivsize; } static inline unsigned int crypto_sync_skcipher_ivsize( @@ -286,6 +416,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize( } /** + * crypto_lskcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the lskcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_lskcipher_ivsize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.ivsize; +} + +/** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * @@ -301,10 +446,20 @@ static inline unsigned int crypto_skcipher_blocksize( return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } -static inline unsigned int crypto_skcipher_alg_chunksize( - struct skcipher_alg *alg) +/** + * crypto_lskcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the lskcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_lskcipher_blocksize( + struct crypto_lskcipher *tfm) { - return alg->chunksize; + return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm)); } /** @@ -321,7 +476,24 @@ static inline unsigned int crypto_skcipher_alg_chunksize( static inline unsigned int crypto_skcipher_chunksize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); + return crypto_skcipher_alg_common(tfm)->chunksize; +} + +/** + * crypto_lskcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_lskcipher_chunksize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.chunksize; } static inline unsigned int crypto_sync_skcipher_blocksize( @@ -336,6 +508,12 @@ static inline unsigned int crypto_skcipher_alignmask( return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); } +static inline unsigned int crypto_lskcipher_alignmask( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm)); +} + static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) { return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); @@ -371,6 +549,23 @@ static inline void crypto_sync_skcipher_clear_flags( crypto_skcipher_clear_flags(&tfm->base, flags); } +static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm)); +} + +static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags); +} + +static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags); +} + /** * crypto_skcipher_setkey() - set key for cipher * @tfm: cipher handle @@ -396,16 +591,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, return crypto_skcipher_setkey(&tfm->base, key, keylen); } +/** + * crypto_lskcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the lskcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen); + static inline unsigned int crypto_skcipher_min_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->min_keysize; + return crypto_skcipher_alg_common(tfm)->min_keysize; } static inline unsigned int crypto_skcipher_max_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->max_keysize; + return crypto_skcipher_alg_common(tfm)->max_keysize; +} + +static inline unsigned int crypto_lskcipher_min_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.min_keysize; +} + +static inline unsigned int crypto_lskcipher_max_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.max_keysize; } /** @@ -458,6 +684,42 @@ int crypto_skcipher_encrypt(struct skcipher_request *req); int crypto_skcipher_decrypt(struct skcipher_request *req); /** + * crypto_lskcipher_encrypt() - encrypt plaintext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Encrypt plaintext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + +/** + * crypto_lskcipher_decrypt() - decrypt ciphertext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Decrypt ciphertext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + +/** * DOC: Symmetric Key Cipher Request Handle * * The skcipher_request data structure contains all pointers to data diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 90b69270f2..724c45e3e9 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -68,4 +68,9 @@ enum amd_asic_type { extern const char *amdgpu_asic_name[]; +struct amdgpu_asic_type_quirk { + unsigned short device; /* PCI device ID */ + u8 revision; /* revision ID */ + unsigned short type; /* real ASIC type */ +}; #endif /*__AMD_ASIC_TYPE_H__ */ diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h index 5286a53a18..65d5e68065 100644 --- a/include/drm/bridge/dw_mipi_dsi.h +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -11,6 +11,10 @@ #include <linux/types.h> +#include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> #include <drm/drm_modes.h> struct drm_display_mode; @@ -55,6 +59,17 @@ struct dw_mipi_dsi_plat_data { unsigned long mode_flags, u32 lanes, u32 format); + bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + u32 *(*get_input_bus_fmts)(void *priv_data, + struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + const struct dw_mipi_dsi_phy_ops *phy_ops; const struct dw_mipi_dsi_host_ops *host_ops; @@ -68,5 +83,6 @@ void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi); int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder); void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi); void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave); +struct drm_bridge *dw_mipi_dsi_get_bridge(struct dw_mipi_dsi *dsi); #endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h index 6fc9bb2979..e0c1050512 100644 --- a/include/drm/bridge/samsung-dsim.h +++ b/include/drm/bridge/samsung-dsim.h @@ -61,6 +61,8 @@ struct samsung_dsim_driver_data { unsigned int num_bits_resol; unsigned int pll_p_offset; const unsigned int *reg_values; + unsigned int pll_fin_min; + unsigned int pll_fin_max; u16 m_min; u16 m_max; }; @@ -88,6 +90,7 @@ struct samsung_dsim { void __iomem *reg_base; struct phy *phy; struct clk **clks; + struct clk *pll_clk; struct regulator_bulk_data supplies[2]; int irq; struct gpio_desc *te_gpio; @@ -116,7 +119,7 @@ struct samsung_dsim { }; extern int samsung_dsim_probe(struct platform_device *pdev); -extern int samsung_dsim_remove(struct platform_device *pdev); +extern void samsung_dsim_remove(struct platform_device *pdev); extern const struct dev_pm_ops samsung_dsim_pm_ops; #endif /* __SAMSUNG_DSIM__ */ diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 86f24a7592..3d74b2cec7 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -272,8 +272,8 @@ struct drm_dp_aux_msg { }; struct cec_adapter; -struct edid; struct drm_connector; +struct drm_edid; /** * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX @@ -507,18 +507,18 @@ bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], u8 type); bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -530,7 +530,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid, + const struct drm_edid *drm_edid, struct drm_dp_aux *aux); enum drm_mode_subconnector drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -699,6 +699,7 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux); void drm_dp_cec_register_connector(struct drm_dp_aux *aux, struct drm_connector *connector); void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux); +void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address); void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid); void drm_dp_cec_unset_edid(struct drm_dp_aux *aux); #else @@ -716,6 +717,11 @@ static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux) { } +static inline void drm_dp_cec_attach(struct drm_dp_aux *aux, + u16 source_physical_address) +{ +} + static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) { diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 8eeb6730ac..655862b3d2 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -46,6 +46,13 @@ struct drm_dp_mst_topology_ref_history { }; #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ +enum drm_dp_mst_payload_allocation { + DRM_DP_MST_PAYLOAD_ALLOCATION_NONE, + DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL, + DRM_DP_MST_PAYLOAD_ALLOCATION_DFP, + DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE, +}; + struct drm_dp_mst_branch; /** @@ -537,7 +544,7 @@ struct drm_dp_mst_atomic_payload { * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the * previous MST states payload start slots have been copied over to the new state. Note * that a new start slot won't be assigned/removed from this payload until - * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called. + * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called. * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to * get committed to hardware by calling drm_crtc_commit_wait() on each of the * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps. @@ -564,6 +571,9 @@ struct drm_dp_mst_atomic_payload { /** @dsc_enabled: Whether or not this payload has DSC enabled */ bool dsc_enabled : 1; + /** @payload_allocation_status: The allocation status of this payload */ + enum drm_dp_mst_payload_allocation payload_allocation_status; + /** @next: The list node for this payload */ struct list_head next; }; @@ -842,10 +852,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, struct drm_atomic_state *state, struct drm_dp_mst_atomic_payload *payload); -void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_topology_state *mst_state, - const struct drm_dp_mst_atomic_payload *old_payload, - struct drm_dp_mst_atomic_payload *new_payload); +void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); +void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + const struct drm_dp_mst_atomic_payload *old_payload, + struct drm_dp_mst_atomic_payload *new_payload); int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h index d4955062c7..f4d3784b1d 100644 --- a/include/drm/drm_accel.h +++ b/include/drm/drm_accel.h @@ -58,7 +58,8 @@ int accel_minor_alloc(void); void accel_minor_replace(struct drm_minor *minor, int index); void accel_set_device_instance_params(struct device *kdev, int index); int accel_open(struct inode *inode, struct file *filp); -void accel_debugfs_init(struct drm_minor *minor, int minor_id); +void accel_debugfs_init(struct drm_device *dev); +void accel_debugfs_register(struct drm_device *dev); #else @@ -89,7 +90,11 @@ static inline void accel_set_device_instance_params(struct device *kdev, int ind { } -static inline void accel_debugfs_init(struct drm_minor *minor, int minor_id) +static inline void accel_debugfs_init(struct drm_device *dev) +{ +} + +static inline void accel_debugfs_register(struct drm_device *dev) { } diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 9a022caacf..cf8e1220a4 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -1126,7 +1126,7 @@ struct drm_bridge_state { struct drm_bus_cfg input_bus_cfg; /** - * @output_bus_cfg: input bus configuration + * @output_bus_cfg: output bus configuration */ struct drm_bus_cfg output_bus_cfg; }; diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 103ff57fc8..9ef461aa9b 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -32,6 +32,8 @@ #include <drm/drm_mode_object.h> #include <drm/drm_modes.h> +struct device_node; + struct drm_bridge; struct drm_bridge_timings; struct drm_connector; @@ -716,10 +718,8 @@ struct drm_bridge { struct drm_encoder *encoder; /** @chain_node: used to form a bridge chain */ struct list_head chain_node; -#ifdef CONFIG_OF /** @of_node: device node pointer to the bridge */ struct device_node *of_node; -#endif /** @list: to keep track of all added bridges */ struct list_head list; /** @@ -950,6 +950,6 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif -void drm_bridge_debugfs_init(struct drm_minor *minor); +void drm_bridge_debugfs_init(struct drm_device *dev); #endif diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index 572077ff8a..a5b39fc010 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -22,8 +22,9 @@ start__ >= max__ || size__ > max__ - start__; \ }) -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0) -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1) +#define DRM_BUDDY_RANGE_ALLOCATION BIT(0) +#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) struct drm_buddy_block { #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); void drm_buddy_block_print(struct drm_buddy *mm, struct drm_buddy_block *block, struct drm_printer *p); - #endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index c0a14b40c0..d47458ecda 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -195,6 +195,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); drm_for_each_connector_iter(connector, iter) \ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) -void drm_client_debugfs_init(struct drm_minor *minor); +void drm_client_debugfs_init(struct drm_device *dev); #endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 81c298488b..6b5eec10c3 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -24,6 +24,7 @@ #define __DRM_COLOR_MGMT_H__ #include <linux/ctype.h> +#include <linux/math64.h> #include <drm/drm_property.h> struct drm_crtc; diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index d300fde6c1..fe88d7fc6b 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -498,6 +498,8 @@ enum drm_privacy_screen_status { * ITU-R BT.601 colorimetry format * The DP spec does not say whether this is the 525 or the 625 * line version. + * @DRM_MODE_COLORIMETRY_COUNT: + * Not a valid value; merely used four counting */ enum drm_colorspace { /* For Default case, driver will set the colorspace */ @@ -522,7 +524,6 @@ enum drm_colorspace { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13, DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14, DRM_MODE_COLORIMETRY_BT601_YCC = 15, - /* not a valid value; merely used for counting */ DRM_MODE_COLORIMETRY_COUNT }; @@ -816,6 +817,14 @@ struct drm_display_info { * @quirks: EDID based quirks. Internal to EDID parsing. */ u32 quirks; + + /** + * @source_physical_address: Source Physical Address from HDMI + * Vendor-Specific Data Block, for CEC usage. + * + * Defaults to CEC_PHYS_ADDR_INVALID (0xffff). + */ + u16 source_physical_address; }; int drm_display_info_set_bus_formats(struct drm_display_info *info, @@ -1327,7 +1336,8 @@ struct drm_connector_funcs { * This will get called when a hotplug-event for a drm-connector * has been received from a source outside the display driver / device. */ - void (*oob_hotplug_event)(struct drm_connector *connector); + void (*oob_hotplug_event)(struct drm_connector *connector, + enum drm_connector_status status); /** * @debugfs_init: @@ -1971,7 +1981,8 @@ drm_connector_is_unregistered(struct drm_connector *connector) DRM_CONNECTOR_UNREGISTERED; } -void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode); +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode, + enum drm_connector_status status); const char *drm_get_connector_type_name(unsigned int connector_type); const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h index cb2c1956a2..cf06cee434 100644 --- a/include/drm/drm_debugfs.h +++ b/include/drm/drm_debugfs.h @@ -35,7 +35,7 @@ #include <linux/types.h> #include <linux/seq_file.h> -#include <drm/drm_gpuva_mgr.h> +#include <drm/drm_gpuvm.h> /** * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space @@ -142,8 +142,8 @@ struct drm_debugfs_entry { void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, struct drm_minor *minor); -int drm_debugfs_remove_files(const struct drm_info_list *files, - int count, struct drm_minor *minor); +int drm_debugfs_remove_files(const struct drm_info_list *files, int count, + struct dentry *root, struct drm_minor *minor); void drm_debugfs_add_file(struct drm_device *dev, const char *name, int (*show)(struct seq_file*, void*), void *data); @@ -152,7 +152,7 @@ void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count); int drm_debugfs_gpuva_info(struct seq_file *m, - struct drm_gpuva_manager *mgr); + struct drm_gpuvm *gpuvm); #else static inline void drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, @@ -160,7 +160,8 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files, {} static inline int drm_debugfs_remove_files(const struct drm_info_list *files, - int count, struct drm_minor *minor) + int count, struct dentry *root, + struct drm_minor *minor) { return 0; } @@ -176,7 +177,7 @@ static inline void drm_debugfs_add_files(struct drm_device *dev, {} static inline int drm_debugfs_gpuva_info(struct seq_file *m, - struct drm_gpuva_manager *mgr) + struct drm_gpuvm *gpuvm) { return 0; } diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 7cf4afae2e..c490977ee2 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -312,19 +312,11 @@ struct drm_device { struct drm_fb_helper *fb_helper; /** - * @debugfs_mutex: + * @debugfs_root: * - * Protects &debugfs_list access. + * Root directory for debugfs files. */ - struct mutex debugfs_mutex; - - /** - * @debugfs_list: - * - * List of debugfs files to be created by the DRM device. The files - * must be added during drm_dev_register(). - */ - struct list_head debugfs_list; + struct dentry *debugfs_root; /* Everything below here is for legacy driver, never use! */ /* private: */ diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 0a72b13781..ea36aa79dc 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -590,4 +590,12 @@ static inline bool drm_firmware_drivers_only(void) return video_firmware_drivers_only(); } +#if defined(CONFIG_DEBUG_FS) +void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root); +#else +static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) +{ +} +#endif + #endif diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 48e93f909e..882d263870 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -612,6 +612,7 @@ const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector, int drm_edid_connector_update(struct drm_connector *connector, const struct drm_edid *edid); int drm_edid_connector_add_modes(struct drm_connector *connector); +bool drm_edid_is_digital(const struct drm_edid *drm_edid); const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid, int ext_id, int *ext_index); diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index c8c2a63b9e..8f35dcea82 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -79,10 +79,8 @@ struct drm_minor { struct device *kdev; /* Linux device */ struct drm_device *dev; + struct dentry *debugfs_symlink; struct dentry *debugfs_root; - - struct list_head debugfs_list; - struct mutex debugfs_lock; /* Protects debugfs_list. */ }; /** diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 532ae78ca7..ccf91daa43 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -22,6 +22,7 @@ #ifndef __DRM_FOURCC_H__ #define __DRM_FOURCC_H__ +#include <linux/math.h> #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> @@ -279,7 +280,7 @@ int drm_format_info_plane_width(const struct drm_format_info *info, int width, if (plane == 0) return width; - return width / info->hsub; + return DIV_ROUND_UP(width, info->hsub); } /** @@ -301,7 +302,7 @@ int drm_format_info_plane_height(const struct drm_format_info *info, int height, if (plane == 0) return height; - return height / info->vsub; + return DIV_ROUND_UP(height, info->vsub); } const struct drm_format_info *__drm_format_info(u32 format); diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 0dcc07b686..80ece7b6dd 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -292,11 +292,6 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) -int drm_framebuffer_plane_width(int width, - const struct drm_framebuffer *fb, int plane); -int drm_framebuffer_plane_height(int height, - const struct drm_framebuffer *fb, int plane); - /** * struct drm_afbc_framebuffer - a special afbc frame buffer object * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index bc9f6aa2f3..16364487fd 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -209,6 +209,15 @@ struct drm_gem_object_funcs { enum drm_gem_object_status (*status)(struct drm_gem_object *obj); /** + * @rss: + * + * Return resident size of the object in physical memory. + * + * Called by drm_show_memory_stats(). + */ + size_t (*rss)(struct drm_gem_object *obj); + + /** * @vm_ops: * * Virtual memory operations used with mmap. diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuvm.h index ed8d50200c..c7a0594bda 100644 --- a/include/drm/drm_gpuva_mgr.h +++ b/include/drm/drm_gpuvm.h @@ -1,7 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ -#ifndef __DRM_GPUVA_MGR_H__ -#define __DRM_GPUVA_MGR_H__ +#ifndef __DRM_GPUVM_H__ +#define __DRM_GPUVM_H__ /* * Copyright (c) 2022 Red Hat. @@ -31,8 +31,8 @@ #include <drm/drm_gem.h> -struct drm_gpuva_manager; -struct drm_gpuva_fn_ops; +struct drm_gpuvm; +struct drm_gpuvm_ops; /** * enum drm_gpuva_flags - flags for struct drm_gpuva @@ -62,15 +62,15 @@ enum drm_gpuva_flags { * struct drm_gpuva - structure to track a GPU VA mapping * * This structure represents a GPU VA mapping and is associated with a - * &drm_gpuva_manager. + * &drm_gpuvm. * * Typically, this structure is embedded in bigger driver structures. */ struct drm_gpuva { /** - * @mgr: the &drm_gpuva_manager this object is associated with + * @vm: the &drm_gpuvm this object is associated with */ - struct drm_gpuva_manager *mgr; + struct drm_gpuvm *vm; /** * @flags: the &drm_gpuva_flags for this mapping @@ -137,20 +137,18 @@ struct drm_gpuva { } rb; }; -int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va); +int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); void drm_gpuva_remove(struct drm_gpuva *va); void drm_gpuva_link(struct drm_gpuva *va); void drm_gpuva_unlink(struct drm_gpuva *va); -struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr, +struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, u64 addr, u64 range); -struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr, +struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, u64 addr, u64 range); -struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start); -struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end); - -bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range); +struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); +struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, struct drm_gem_object *obj, u64 offset) @@ -186,7 +184,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) } /** - * struct drm_gpuva_manager - DRM GPU VA Manager + * struct drm_gpuvm - DRM GPU VA Manager * * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using * &maple_tree structures. Typically, this structure is embedded in bigger @@ -197,7 +195,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) * * There should be one manager instance per GPU virtual address space. */ -struct drm_gpuva_manager { +struct drm_gpuvm { /** * @name: the name of the DRM GPU VA space */ @@ -237,100 +235,101 @@ struct drm_gpuva_manager { struct drm_gpuva kernel_alloc_node; /** - * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers + * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers */ - const struct drm_gpuva_fn_ops *ops; + const struct drm_gpuvm_ops *ops; }; -void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr, - const char *name, - u64 start_offset, u64 range, - u64 reserve_offset, u64 reserve_range, - const struct drm_gpuva_fn_ops *ops); -void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr); +void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + u64 start_offset, u64 range, + u64 reserve_offset, u64 reserve_range, + const struct drm_gpuvm_ops *ops); +void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); + +bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); static inline struct drm_gpuva * __drm_gpuva_next(struct drm_gpuva *va) { - if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list)) + if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list)) return list_next_entry(va, rb.entry); return NULL; } /** - * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas + * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas * @va__: &drm_gpuva structure to assign to in each iteration step - * @mgr__: &drm_gpuva_manager to walk over + * @gpuvm__: &drm_gpuvm to walk over * @start__: starting offset, the first gpuva will overlap this * @end__: ending offset, the last gpuva will start before this (but may * overlap) * - * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie + * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie * between @start__ and @end__. It is implemented similarly to list_for_each(), - * but is using the &drm_gpuva_manager's internal interval tree to accelerate + * but is using the &drm_gpuvm's internal interval tree to accelerate * the search for the starting &drm_gpuva, and hence isn't safe against removal * of elements. It assumes that @end__ is within (or is the upper limit of) the - * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's + * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's * @kernel_alloc_node. */ -#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \ - for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \ +#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \ + for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \ va__ && (va__->va.addr < (end__)); \ va__ = __drm_gpuva_next(va__)) /** - * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of + * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of * &drm_gpuvas * @va__: &drm_gpuva to assign to in each iteration step * @next__: another &drm_gpuva to use as temporary storage - * @mgr__: &drm_gpuva_manager to walk over + * @gpuvm__: &drm_gpuvm to walk over * @start__: starting offset, the first gpuva will overlap this * @end__: ending offset, the last gpuva will start before this (but may * overlap) * - * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie + * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie * between @start__ and @end__. It is implemented similarly to - * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval + * list_for_each_safe(), but is using the &drm_gpuvm's internal interval * tree to accelerate the search for the starting &drm_gpuva, and hence is safe * against removal of elements. It assumes that @end__ is within (or is the - * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the - * &drm_gpuva_manager's @kernel_alloc_node. + * upper limit of) the &drm_gpuvm. This iterator does not skip over the + * &drm_gpuvm's @kernel_alloc_node. */ -#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \ - for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \ +#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \ + for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \ next__ = __drm_gpuva_next(va__); \ va__ && (va__->va.addr < (end__)); \ va__ = next__, next__ = __drm_gpuva_next(va__)) /** - * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas + * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas * @va__: &drm_gpuva to assign to in each iteration step - * @mgr__: &drm_gpuva_manager to walk over + * @gpuvm__: &drm_gpuvm to walk over * * This iterator walks over all &drm_gpuva structures associated with the given - * &drm_gpuva_manager. + * &drm_gpuvm. */ -#define drm_gpuva_for_each_va(va__, mgr__) \ - list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry) +#define drm_gpuvm_for_each_va(va__, gpuvm__) \ + list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry) /** - * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas + * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas * @va__: &drm_gpuva to assign to in each iteration step * @next__: another &drm_gpuva to use as temporary storage - * @mgr__: &drm_gpuva_manager to walk over + * @gpuvm__: &drm_gpuvm to walk over * * This iterator walks over all &drm_gpuva structures associated with the given - * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and + * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and * hence safe against the removal of elements. */ -#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \ - list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry) +#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \ + list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) /** * enum drm_gpuva_op_type - GPU VA operation type * - * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager. + * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm. */ enum drm_gpuva_op_type { /** @@ -413,7 +412,7 @@ struct drm_gpuva_op_unmap { * * Optionally, if &keep is set, drivers may keep the actual page table * mappings for this &drm_gpuva, adding the missing page table entries - * only and update the &drm_gpuva_manager accordingly. + * only and update the &drm_gpuvm accordingly. */ bool keep; }; @@ -584,22 +583,22 @@ struct drm_gpuva_ops { #define drm_gpuva_next_op(op) list_next_entry(op, entry) struct drm_gpuva_ops * -drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr, +drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range, struct drm_gem_object *obj, u64 offset); struct drm_gpuva_ops * -drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr, +drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range); struct drm_gpuva_ops * -drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr, +drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range); struct drm_gpuva_ops * -drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr, +drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj); -void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr, +void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, struct drm_gpuva_ops *ops); static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, @@ -610,15 +609,15 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, } /** - * struct drm_gpuva_fn_ops - callbacks for split/merge steps + * struct drm_gpuvm_ops - callbacks for split/merge steps * - * This structure defines the callbacks used by &drm_gpuva_sm_map and - * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap + * This structure defines the callbacks used by &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap * operations to drivers. */ -struct drm_gpuva_fn_ops { +struct drm_gpuvm_ops { /** - * @op_alloc: called when the &drm_gpuva_manager allocates + * @op_alloc: called when the &drm_gpuvm allocates * a struct drm_gpuva_op * * Some drivers may want to embed struct drm_gpuva_op into driver @@ -630,7 +629,7 @@ struct drm_gpuva_fn_ops { struct drm_gpuva_op *(*op_alloc)(void); /** - * @op_free: called when the &drm_gpuva_manager frees a + * @op_free: called when the &drm_gpuvm frees a * struct drm_gpuva_op * * Some drivers may want to embed struct drm_gpuva_op into driver @@ -642,19 +641,19 @@ struct drm_gpuva_fn_ops { void (*op_free)(struct drm_gpuva_op *op); /** - * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the + * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the * mapping once all previous steps were completed * * The &priv pointer matches the one the driver passed to - * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. * - * Can be NULL if &drm_gpuva_sm_map is used. + * Can be NULL if &drm_gpuvm_sm_map is used. */ int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); /** - * @sm_step_remap: called from &drm_gpuva_sm_map and - * &drm_gpuva_sm_unmap to split up an existent mapping + * @sm_step_remap: called from &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to split up an existent mapping * * This callback is called when existent mapping needs to be split up. * This is the case when either a newly requested mapping overlaps or @@ -662,38 +661,38 @@ struct drm_gpuva_fn_ops { * mapping is requested. * * The &priv pointer matches the one the driver passed to - * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. * - * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is + * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is * used. */ int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); /** - * @sm_step_unmap: called from &drm_gpuva_sm_map and - * &drm_gpuva_sm_unmap to unmap an existent mapping + * @sm_step_unmap: called from &drm_gpuvm_sm_map and + * &drm_gpuvm_sm_unmap to unmap an existent mapping * * This callback is called when existent mapping needs to be unmapped. * This is the case when either a newly requested mapping encloses an * existent mapping or an unmap of an existent mapping is requested. * * The &priv pointer matches the one the driver passed to - * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively. + * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. * - * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is + * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is * used. */ int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); }; -int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv, +int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, u64 addr, u64 range, struct drm_gem_object *obj, u64 offset); -int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv, +int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, u64 addr, u64 range); -void drm_gpuva_map(struct drm_gpuva_manager *mgr, +void drm_gpuva_map(struct drm_gpuvm *gpuvm, struct drm_gpuva *va, struct drm_gpuva_op_map *op); @@ -703,4 +702,4 @@ void drm_gpuva_remap(struct drm_gpuva *prev, void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); -#endif /* __DRM_GPUVA_MGR_H__ */ +#endif /* __DRM_GPUVM_H__ */ diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index c9df040798..c0aec0d4d6 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -168,6 +168,7 @@ struct mipi_dsi_device_info { * struct mipi_dsi_device - DSI peripheral device * @host: DSI host for this peripheral * @dev: driver model device node for this peripheral + * @attached: the DSI device has been successfully attached * @name: DSI peripheral chip type * @channel: virtual channel assigned to the peripheral * @format: pixel format for video mode @@ -184,6 +185,7 @@ struct mipi_dsi_device_info { struct mipi_dsi_device { struct mipi_dsi_host *host; struct device dev; + bool attached; char name[DSI_DEV_NAME_SIZE]; unsigned int channel; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index ac65f0626c..d2fb81e341 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -471,7 +471,9 @@ struct drm_sched_backend_ops { * @hw_submission_limit: the max size of the hardware queue. * @timeout: the time after which a job is removed from the scheduler. * @name: name of the ring for which this scheduler is being used. - * @sched_rq: priority wise array of run queues. + * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, + * as there's usually one run-queue per priority, but could be less. + * @sched_rq: An allocated array of run-queues of size @num_rqs; * @wake_up_worker: the wait queue on which the scheduler sleeps until a job * is ready to be scheduled. * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler @@ -500,7 +502,8 @@ struct drm_gpu_scheduler { uint32_t hw_submission_limit; long timeout; const char *name; - struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; + u32 num_rqs; + struct drm_sched_rq **sched_rq; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; atomic_t hw_rq_count; @@ -520,7 +523,7 @@ struct drm_gpu_scheduler { int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - uint32_t hw_submission, unsigned hang_limit, + u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index e1e10dfbb6..1c9ea6ab3e 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -689,14 +689,18 @@ #define INTEL_RPLU_IDS(info) \ INTEL_VGA_DEVICE(0xA721, info), \ INTEL_VGA_DEVICE(0xA7A1, info), \ - INTEL_VGA_DEVICE(0xA7A9, info) + INTEL_VGA_DEVICE(0xA7A9, info), \ + INTEL_VGA_DEVICE(0xA7AC, info), \ + INTEL_VGA_DEVICE(0xA7AD, info) /* RPL-P */ #define INTEL_RPLP_IDS(info) \ INTEL_RPLU_IDS(info), \ INTEL_VGA_DEVICE(0xA720, info), \ INTEL_VGA_DEVICE(0xA7A0, info), \ - INTEL_VGA_DEVICE(0xA7A8, info) + INTEL_VGA_DEVICE(0xA7A8, info), \ + INTEL_VGA_DEVICE(0xA7AA, info), \ + INTEL_VGA_DEVICE(0xA7AB, info) /* DG2 */ #define INTEL_DG2_G10_IDS(info) \ @@ -714,7 +718,11 @@ INTEL_VGA_DEVICE(0x56A5, info), \ INTEL_VGA_DEVICE(0x56A6, info), \ INTEL_VGA_DEVICE(0x56B0, info), \ - INTEL_VGA_DEVICE(0x56B1, info) + INTEL_VGA_DEVICE(0x56B1, info), \ + INTEL_VGA_DEVICE(0x56BA, info), \ + INTEL_VGA_DEVICE(0x56BB, info), \ + INTEL_VGA_DEVICE(0x56BC, info), \ + INTEL_VGA_DEVICE(0x56BD, info) #define INTEL_DG2_G12_IDS(info) \ INTEL_VGA_DEVICE(0x5696, info), \ @@ -738,18 +746,14 @@ #define INTEL_ATS_M_IDS(info) \ INTEL_ATS_M150_IDS(info), \ INTEL_ATS_M75_IDS(info) + /* MTL */ -#define INTEL_MTL_M_IDS(info) \ +#define INTEL_MTL_IDS(info) \ INTEL_VGA_DEVICE(0x7D40, info), \ - INTEL_VGA_DEVICE(0x7D60, info) - -#define INTEL_MTL_P_IDS(info) \ INTEL_VGA_DEVICE(0x7D45, info), \ INTEL_VGA_DEVICE(0x7D55, info), \ + INTEL_VGA_DEVICE(0x7D60, info), \ + INTEL_VGA_DEVICE(0x7D67, info), \ INTEL_VGA_DEVICE(0x7DD5, info) -#define INTEL_MTL_IDS(info) \ - INTEL_MTL_M_IDS(info), \ - INTEL_MTL_P_IDS(info) - #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h index a702b6ec17..7d96985f2d 100644 --- a/include/drm/i915_pxp_tee_interface.h +++ b/include/drm/i915_pxp_tee_interface.h @@ -22,8 +22,10 @@ struct i915_pxp_component_ops { */ struct module *owner; - int (*send)(struct device *dev, const void *message, size_t size); - int (*recv)(struct device *dev, void *buffer, size_t size); + int (*send)(struct device *dev, const void *message, size_t size, + unsigned long timeout_ms); + int (*recv)(struct device *dev, void *buffer, size_t size, + unsigned long timeout_ms); ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id, struct scatterlist *sg_in, size_t total_in_len, struct scatterlist *sg_out); diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h index be12e1dd1f..f7248348a4 100644 --- a/include/dt-bindings/arm/qcom,ids.h +++ b/include/dt-bindings/arm/qcom,ids.h @@ -193,6 +193,7 @@ #define QCOM_ID_SDA439 363 #define QCOM_ID_SDA429 364 #define QCOM_ID_SM7150 365 +#define QCOM_ID_SM7150P 366 #define QCOM_ID_IPQ8070 375 #define QCOM_ID_IPQ8071 376 #define QCOM_ID_QM215 386 @@ -203,6 +204,9 @@ #define QCOM_ID_SM6125 394 #define QCOM_ID_IPQ8070A 395 #define QCOM_ID_IPQ8071A 396 +#define QCOM_ID_IPQ8172 397 +#define QCOM_ID_IPQ8173 398 +#define QCOM_ID_IPQ8174 399 #define QCOM_ID_IPQ6018 402 #define QCOM_ID_IPQ6028 403 #define QCOM_ID_SDM429W 416 @@ -233,6 +237,7 @@ #define QCOM_ID_SM8450_3 482 #define QCOM_ID_SC7280 487 #define QCOM_ID_SC7180P 495 +#define QCOM_ID_QCM6490 497 #define QCOM_ID_IPQ5000 503 #define QCOM_ID_IPQ0509 504 #define QCOM_ID_IPQ0518 505 diff --git a/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h new file mode 100644 index 0000000000..861a331963 --- /dev/null +++ b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved. + * Author: Yu Tu <yu.tu@amlogic.com> + */ + +#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H +#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H + +#define CLKID_RTC_32K_CLKIN 0 +#define CLKID_RTC_32K_DIV 1 +#define CLKID_RTC_32K_SEL 2 +#define CLKID_RTC_32K_XATL 3 +#define CLKID_RTC 4 +#define CLKID_SYS_CLK_B_SEL 5 +#define CLKID_SYS_CLK_B_DIV 6 +#define CLKID_SYS_CLK_B 7 +#define CLKID_SYS_CLK_A_SEL 8 +#define CLKID_SYS_CLK_A_DIV 9 +#define CLKID_SYS_CLK_A 10 +#define CLKID_SYS 11 +#define CLKID_CECA_32K_CLKIN 12 +#define CLKID_CECA_32K_DIV 13 +#define CLKID_CECA_32K_SEL_PRE 14 +#define CLKID_CECA_32K_SEL 15 +#define CLKID_CECA_32K_CLKOUT 16 +#define CLKID_CECB_32K_CLKIN 17 +#define CLKID_CECB_32K_DIV 18 +#define CLKID_CECB_32K_SEL_PRE 19 +#define CLKID_CECB_32K_SEL 20 +#define CLKID_CECB_32K_CLKOUT 21 +#define CLKID_SC_CLK_SEL 22 +#define CLKID_SC_CLK_DIV 23 +#define CLKID_SC 24 +#define CLKID_12_24M 25 +#define CLKID_12M_CLK_DIV 26 +#define CLKID_12_24M_CLK_SEL 27 +#define CLKID_VID_PLL_DIV 28 +#define CLKID_VID_PLL_SEL 29 +#define CLKID_VID_PLL 30 +#define CLKID_VCLK_SEL 31 +#define CLKID_VCLK2_SEL 32 +#define CLKID_VCLK_INPUT 33 +#define CLKID_VCLK2_INPUT 34 +#define CLKID_VCLK_DIV 35 +#define CLKID_VCLK2_DIV 36 +#define CLKID_VCLK 37 +#define CLKID_VCLK2 38 +#define CLKID_VCLK_DIV1 39 +#define CLKID_VCLK_DIV2_EN 40 +#define CLKID_VCLK_DIV4_EN 41 +#define CLKID_VCLK_DIV6_EN 42 +#define CLKID_VCLK_DIV12_EN 43 +#define CLKID_VCLK2_DIV1 44 +#define CLKID_VCLK2_DIV2_EN 45 +#define CLKID_VCLK2_DIV4_EN 46 +#define CLKID_VCLK2_DIV6_EN 47 +#define CLKID_VCLK2_DIV12_EN 48 +#define CLKID_VCLK_DIV2 49 +#define CLKID_VCLK_DIV4 50 +#define CLKID_VCLK_DIV6 51 +#define CLKID_VCLK_DIV12 52 +#define CLKID_VCLK2_DIV2 53 +#define CLKID_VCLK2_DIV4 54 +#define CLKID_VCLK2_DIV6 55 +#define CLKID_VCLK2_DIV12 56 +#define CLKID_CTS_ENCI_SEL 57 +#define CLKID_CTS_ENCP_SEL 58 +#define CLKID_CTS_VDAC_SEL 59 +#define CLKID_HDMI_TX_SEL 60 +#define CLKID_CTS_ENCI 61 +#define CLKID_CTS_ENCP 62 +#define CLKID_CTS_VDAC 63 +#define CLKID_HDMI_TX 64 +#define CLKID_HDMI_SEL 65 +#define CLKID_HDMI_DIV 66 +#define CLKID_HDMI 67 +#define CLKID_TS_CLK_DIV 68 +#define CLKID_TS 69 +#define CLKID_MALI_0_SEL 70 +#define CLKID_MALI_0_DIV 71 +#define CLKID_MALI_0 72 +#define CLKID_MALI_1_SEL 73 +#define CLKID_MALI_1_DIV 74 +#define CLKID_MALI_1 75 +#define CLKID_MALI_SEL 76 +#define CLKID_VDEC_P0_SEL 77 +#define CLKID_VDEC_P0_DIV 78 +#define CLKID_VDEC_P0 79 +#define CLKID_VDEC_P1_SEL 80 +#define CLKID_VDEC_P1_DIV 81 +#define CLKID_VDEC_P1 82 +#define CLKID_VDEC_SEL 83 +#define CLKID_HEVCF_P0_SEL 84 +#define CLKID_HEVCF_P0_DIV 85 +#define CLKID_HEVCF_P0 86 +#define CLKID_HEVCF_P1_SEL 87 +#define CLKID_HEVCF_P1_DIV 88 +#define CLKID_HEVCF_P1 89 +#define CLKID_HEVCF_SEL 90 +#define CLKID_VPU_0_SEL 91 +#define CLKID_VPU_0_DIV 92 +#define CLKID_VPU_0 93 +#define CLKID_VPU_1_SEL 94 +#define CLKID_VPU_1_DIV 95 +#define CLKID_VPU_1 96 +#define CLKID_VPU 97 +#define CLKID_VPU_CLKB_TMP_SEL 98 +#define CLKID_VPU_CLKB_TMP_DIV 99 +#define CLKID_VPU_CLKB_TMP 100 +#define CLKID_VPU_CLKB_DIV 101 +#define CLKID_VPU_CLKB 102 +#define CLKID_VPU_CLKC_P0_SEL 103 +#define CLKID_VPU_CLKC_P0_DIV 104 +#define CLKID_VPU_CLKC_P0 105 +#define CLKID_VPU_CLKC_P1_SEL 106 +#define CLKID_VPU_CLKC_P1_DIV 107 +#define CLKID_VPU_CLKC_P1 108 +#define CLKID_VPU_CLKC_SEL 109 +#define CLKID_VAPB_0_SEL 110 +#define CLKID_VAPB_0_DIV 111 +#define CLKID_VAPB_0 112 +#define CLKID_VAPB_1_SEL 113 +#define CLKID_VAPB_1_DIV 114 +#define CLKID_VAPB_1 115 +#define CLKID_VAPB 116 +#define CLKID_GE2D 117 +#define CLKID_VDIN_MEAS_SEL 118 +#define CLKID_VDIN_MEAS_DIV 119 +#define CLKID_VDIN_MEAS 120 +#define CLKID_SD_EMMC_C_CLK_SEL 121 +#define CLKID_SD_EMMC_C_CLK_DIV 122 +#define CLKID_SD_EMMC_C 123 +#define CLKID_SD_EMMC_A_CLK_SEL 124 +#define CLKID_SD_EMMC_A_CLK_DIV 125 +#define CLKID_SD_EMMC_A 126 +#define CLKID_SD_EMMC_B_CLK_SEL 127 +#define CLKID_SD_EMMC_B_CLK_DIV 128 +#define CLKID_SD_EMMC_B 129 +#define CLKID_SPICC0_SEL 130 +#define CLKID_SPICC0_DIV 131 +#define CLKID_SPICC0_EN 132 +#define CLKID_PWM_A_SEL 133 +#define CLKID_PWM_A_DIV 134 +#define CLKID_PWM_A 135 +#define CLKID_PWM_B_SEL 136 +#define CLKID_PWM_B_DIV 137 +#define CLKID_PWM_B 138 +#define CLKID_PWM_C_SEL 139 +#define CLKID_PWM_C_DIV 140 +#define CLKID_PWM_C 141 +#define CLKID_PWM_D_SEL 142 +#define CLKID_PWM_D_DIV 143 +#define CLKID_PWM_D 144 +#define CLKID_PWM_E_SEL 145 +#define CLKID_PWM_E_DIV 146 +#define CLKID_PWM_E 147 +#define CLKID_PWM_F_SEL 148 +#define CLKID_PWM_F_DIV 149 +#define CLKID_PWM_F 150 +#define CLKID_PWM_G_SEL 151 +#define CLKID_PWM_G_DIV 152 +#define CLKID_PWM_G 153 +#define CLKID_PWM_H_SEL 154 +#define CLKID_PWM_H_DIV 155 +#define CLKID_PWM_H 156 +#define CLKID_PWM_I_SEL 157 +#define CLKID_PWM_I_DIV 158 +#define CLKID_PWM_I 159 +#define CLKID_PWM_J_SEL 160 +#define CLKID_PWM_J_DIV 161 +#define CLKID_PWM_J 162 +#define CLKID_SARADC_SEL 163 +#define CLKID_SARADC_DIV 164 +#define CLKID_SARADC 165 +#define CLKID_GEN_SEL 166 +#define CLKID_GEN_DIV 167 +#define CLKID_GEN 168 +#define CLKID_DDR 169 +#define CLKID_DOS 170 +#define CLKID_ETHPHY 171 +#define CLKID_MALI 172 +#define CLKID_AOCPU 173 +#define CLKID_AUCPU 174 +#define CLKID_CEC 175 +#define CLKID_SDEMMC_A 176 +#define CLKID_SDEMMC_B 177 +#define CLKID_NAND 178 +#define CLKID_SMARTCARD 179 +#define CLKID_ACODEC 180 +#define CLKID_SPIFC 181 +#define CLKID_MSR 182 +#define CLKID_IR_CTRL 183 +#define CLKID_AUDIO 184 +#define CLKID_ETH 185 +#define CLKID_UART_A 186 +#define CLKID_UART_B 187 +#define CLKID_UART_C 188 +#define CLKID_UART_D 189 +#define CLKID_UART_E 190 +#define CLKID_AIFIFO 191 +#define CLKID_TS_DDR 192 +#define CLKID_TS_PLL 193 +#define CLKID_G2D 194 +#define CLKID_SPICC0 195 +#define CLKID_SPICC1 196 +#define CLKID_USB 197 +#define CLKID_I2C_M_A 198 +#define CLKID_I2C_M_B 199 +#define CLKID_I2C_M_C 200 +#define CLKID_I2C_M_D 201 +#define CLKID_I2C_M_E 202 +#define CLKID_HDMITX_APB 203 +#define CLKID_I2C_S_A 204 +#define CLKID_USB1_TO_DDR 205 +#define CLKID_HDCP22 206 +#define CLKID_MMC_APB 207 +#define CLKID_RSA 208 +#define CLKID_CPU_DEBUG 209 +#define CLKID_VPU_INTR 210 +#define CLKID_DEMOD 211 +#define CLKID_SAR_ADC 212 +#define CLKID_GIC 213 +#define CLKID_PWM_AB 214 +#define CLKID_PWM_CD 215 +#define CLKID_PWM_EF 216 +#define CLKID_PWM_GH 217 +#define CLKID_PWM_IJ 218 +#define CLKID_HDCP22_ESMCLK_SEL 219 +#define CLKID_HDCP22_ESMCLK_DIV 220 +#define CLKID_HDCP22_ESMCLK 221 +#define CLKID_HDCP22_SKPCLK_SEL 222 +#define CLKID_HDCP22_SKPCLK_DIV 223 +#define CLKID_HDCP22_SKPCLK 224 + +#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H */ diff --git a/include/dt-bindings/clock/amlogic,s4-pll-clkc.h b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h new file mode 100644 index 0000000000..af9f110f8b --- /dev/null +++ b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved. + * Author: Yu Tu <yu.tu@amlogic.com> + */ + +#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H +#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H + +#define CLKID_FIXED_PLL_DCO 0 +#define CLKID_FIXED_PLL 1 +#define CLKID_FCLK_DIV2_DIV 2 +#define CLKID_FCLK_DIV2 3 +#define CLKID_FCLK_DIV3_DIV 4 +#define CLKID_FCLK_DIV3 5 +#define CLKID_FCLK_DIV4_DIV 6 +#define CLKID_FCLK_DIV4 7 +#define CLKID_FCLK_DIV5_DIV 8 +#define CLKID_FCLK_DIV5 9 +#define CLKID_FCLK_DIV7_DIV 10 +#define CLKID_FCLK_DIV7 11 +#define CLKID_FCLK_DIV2P5_DIV 12 +#define CLKID_FCLK_DIV2P5 13 +#define CLKID_GP0_PLL_DCO 14 +#define CLKID_GP0_PLL 15 +#define CLKID_HIFI_PLL_DCO 16 +#define CLKID_HIFI_PLL 17 +#define CLKID_HDMI_PLL_DCO 18 +#define CLKID_HDMI_PLL_OD 19 +#define CLKID_HDMI_PLL 20 +#define CLKID_MPLL_50M_DIV 21 +#define CLKID_MPLL_50M 22 +#define CLKID_MPLL_PREDIV 23 +#define CLKID_MPLL0_DIV 24 +#define CLKID_MPLL0 25 +#define CLKID_MPLL1_DIV 26 +#define CLKID_MPLL1 27 +#define CLKID_MPLL2_DIV 28 +#define CLKID_MPLL2 29 +#define CLKID_MPLL3_DIV 30 +#define CLKID_MPLL3 31 + +#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H */ diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 11cb0a4fe9..7da4243984 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -376,7 +376,6 @@ #define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36 #define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37 #define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38 -#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41 #define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42 diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h index a62cb0629a..743ee60632 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h @@ -121,7 +121,6 @@ #define MMSS_MMSSNOC_BTO_AHB_CLK 112 #define MMSS_MMSSNOC_AXI_CLK 113 #define MMSS_S0_AXI_CLK 114 -#define OCMEMCX_AHB_CLK 115 #define OCMEMCX_OCMEMNOC_CLK 116 #define OXILI_OCMEMGX_CLK 117 #define OCMEMNOC_CLK 118 diff --git a/include/dt-bindings/clock/qcom,sm4450-gcc.h b/include/dt-bindings/clock/qcom,sm4450-gcc.h new file mode 100644 index 0000000000..c18e47a86f --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm4450-gcc.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 0 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 1 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3 +#define GCC_BOOT_ROM_AHB_CLK 4 +#define GCC_CAMERA_AHB_CLK 5 +#define GCC_CAMERA_HF_AXI_CLK 6 +#define GCC_CAMERA_SF_AXI_CLK 7 +#define GCC_CAMERA_SLEEP_CLK 8 +#define GCC_CAMERA_XO_CLK 9 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11 +#define GCC_DDRSS_GPU_AXI_CLK 12 +#define GCC_DDRSS_PCIE_SF_TBU_CLK 13 +#define GCC_DISP_AHB_CLK 14 +#define GCC_DISP_HF_AXI_CLK 15 +#define GCC_DISP_XO_CLK 16 +#define GCC_EUSB3_0_CLKREF_EN 17 +#define GCC_GP1_CLK 18 +#define GCC_GP1_CLK_SRC 19 +#define GCC_GP2_CLK 20 +#define GCC_GP2_CLK_SRC 21 +#define GCC_GP3_CLK 22 +#define GCC_GP3_CLK_SRC 23 +#define GCC_GPLL0 24 +#define GCC_GPLL0_OUT_EVEN 25 +#define GCC_GPLL0_OUT_ODD 26 +#define GCC_GPLL1 27 +#define GCC_GPLL3 28 +#define GCC_GPLL4 29 +#define GCC_GPLL9 30 +#define GCC_GPLL10 31 +#define GCC_GPU_CFG_AHB_CLK 32 +#define GCC_GPU_GPLL0_CLK_SRC 33 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 34 +#define GCC_GPU_MEMNOC_GFX_CLK 35 +#define GCC_GPU_SNOC_DVM_GFX_CLK 36 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_CLK 37 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_CLK 38 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_CLK 39 +#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_CLK 40 +#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF0_CLK 41 +#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF1_CLK 42 +#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_SF0_CLK 43 +#define GCC_HLOS1_VOTE_MMU_TCU_CLK 44 +#define GCC_PCIE_0_AUX_CLK 45 +#define GCC_PCIE_0_AUX_CLK_SRC 46 +#define GCC_PCIE_0_CFG_AHB_CLK 47 +#define GCC_PCIE_0_CLKREF_EN 48 +#define GCC_PCIE_0_MSTR_AXI_CLK 49 +#define GCC_PCIE_0_PHY_RCHNG_CLK 50 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51 +#define GCC_PCIE_0_PIPE_CLK 52 +#define GCC_PCIE_0_PIPE_CLK_SRC 53 +#define GCC_PCIE_0_PIPE_DIV2_CLK 54 +#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 55 +#define GCC_PCIE_0_SLV_AXI_CLK 56 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57 +#define GCC_PDM2_CLK 58 +#define GCC_PDM2_CLK_SRC 59 +#define GCC_PDM_AHB_CLK 60 +#define GCC_PDM_XO4_CLK 61 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 63 +#define GCC_QMIP_DISP_AHB_CLK 64 +#define GCC_QMIP_GPU_AHB_CLK 65 +#define GCC_QMIP_PCIE_AHB_CLK 66 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68 +#define GCC_QUPV3_WRAP0_CORE_CLK 69 +#define GCC_QUPV3_WRAP0_S0_CLK 70 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S1_CLK 72 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S2_CLK 74 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S3_CLK 76 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77 +#define GCC_QUPV3_WRAP0_S4_CLK 78 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 80 +#define GCC_QUPV3_WRAP1_CORE_CLK 81 +#define GCC_QUPV3_WRAP1_S0_CLK 82 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 83 +#define GCC_QUPV3_WRAP1_S1_CLK 84 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 85 +#define GCC_QUPV3_WRAP1_S2_CLK 86 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 87 +#define GCC_QUPV3_WRAP1_S3_CLK 88 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S4_CLK 90 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 91 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95 +#define GCC_SDCC1_AHB_CLK 96 +#define GCC_SDCC1_APPS_CLK 97 +#define GCC_SDCC1_APPS_CLK_SRC 98 +#define GCC_SDCC1_ICE_CORE_CLK 99 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 100 +#define GCC_SDCC2_AHB_CLK 101 +#define GCC_SDCC2_APPS_CLK 102 +#define GCC_SDCC2_APPS_CLK_SRC 103 +#define GCC_UFS_0_CLKREF_EN 104 +#define GCC_UFS_PAD_CLKREF_EN 105 +#define GCC_UFS_PHY_AHB_CLK 106 +#define GCC_UFS_PHY_AXI_CLK 107 +#define GCC_UFS_PHY_AXI_CLK_SRC 108 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 109 +#define GCC_UFS_PHY_ICE_CORE_CLK 110 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 111 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 112 +#define GCC_UFS_PHY_PHY_AUX_CLK 113 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 114 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 115 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 116 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 117 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 118 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 119 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 121 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 122 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 123 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 124 +#define GCC_USB30_PRIM_MASTER_CLK 125 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 126 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 127 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 128 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 129 +#define GCC_USB30_PRIM_SLEEP_CLK 130 +#define GCC_USB3_0_CLKREF_EN 131 +#define GCC_USB3_PRIM_PHY_AUX_CLK 132 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 133 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 134 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 135 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 136 +#define GCC_VCODEC0_AXI_CLK 137 +#define GCC_VENUS_CTL_AXI_CLK 138 +#define GCC_VIDEO_AHB_CLK 139 +#define GCC_VIDEO_THROTTLE_CORE_CLK 140 +#define GCC_VIDEO_VCODEC0_SYS_CLK 141 +#define GCC_VIDEO_VENUS_CLK_SRC 142 +#define GCC_VIDEO_VENUS_CTL_CLK 143 +#define GCC_VIDEO_XO_CLK 144 + +/* GCC power domains */ +#define GCC_PCIE_0_GDSC 0 +#define GCC_UFS_PHY_GDSC 1 +#define GCC_USB30_PRIM_GDSC 2 +#define GCC_VCODEC0_GDSC 3 +#define GCC_VENUS_GDSC 4 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_GPU_BCR 2 +#define GCC_PCIE_0_BCR 3 +#define GCC_PCIE_0_LINK_DOWN_BCR 4 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_PHY_BCR 6 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PCIE_PHY_BCR 8 +#define GCC_PCIE_PHY_CFG_AHB_BCR 9 +#define GCC_PCIE_PHY_COM_BCR 10 +#define GCC_PDM_BCR 11 +#define GCC_QUPV3_WRAPPER_0_BCR 12 +#define GCC_QUPV3_WRAPPER_1_BCR 13 +#define GCC_QUSB2PHY_PRIM_BCR 14 +#define GCC_QUSB2PHY_SEC_BCR 15 +#define GCC_SDCC1_BCR 16 +#define GCC_SDCC2_BCR 17 +#define GCC_UFS_PHY_BCR 18 +#define GCC_USB30_PRIM_BCR 19 +#define GCC_USB3_DP_PHY_PRIM_BCR 20 +#define GCC_USB3_DP_PHY_SEC_BCR 21 +#define GCC_USB3_PHY_PRIM_BCR 22 +#define GCC_USB3_PHY_SEC_BCR 23 +#define GCC_USB3PHY_PHY_PRIM_BCR 24 +#define GCC_USB3PHY_PHY_SEC_BCR 25 +#define GCC_VCODEC0_BCR 26 +#define GCC_VENUS_BCR 27 +#define GCC_VIDEO_BCR 28 +#define GCC_VIDEO_VENUS_BCR 29 +#define GCC_VENUS_CTL_AXI_CLK_ARES 30 +#define GCC_VIDEO_VENUS_CTL_CLK_ARES 31 + +#endif diff --git a/include/dt-bindings/clock/qcom,sm8550-camcc.h b/include/dt-bindings/clock/qcom,sm8550-camcc.h new file mode 100644 index 0000000000..a2a256691c --- /dev/null +++ b/include/dt-bindings/clock/qcom,sm8550-camcc.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_CLK 1 +#define CAM_CC_BPS_CLK_SRC 2 +#define CAM_CC_BPS_FAST_AHB_CLK 3 +#define CAM_CC_CAMNOC_AXI_CLK 4 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 5 +#define CAM_CC_CAMNOC_DCD_XO_CLK 6 +#define CAM_CC_CAMNOC_XO_CLK 7 +#define CAM_CC_CCI_0_CLK 8 +#define CAM_CC_CCI_0_CLK_SRC 9 +#define CAM_CC_CCI_1_CLK 10 +#define CAM_CC_CCI_1_CLK_SRC 11 +#define CAM_CC_CCI_2_CLK 12 +#define CAM_CC_CCI_2_CLK_SRC 13 +#define CAM_CC_CORE_AHB_CLK 14 +#define CAM_CC_CPAS_AHB_CLK 15 +#define CAM_CC_CPAS_BPS_CLK 16 +#define CAM_CC_CPAS_CRE_CLK 17 +#define CAM_CC_CPAS_FAST_AHB_CLK 18 +#define CAM_CC_CPAS_IFE_0_CLK 19 +#define CAM_CC_CPAS_IFE_1_CLK 20 +#define CAM_CC_CPAS_IFE_2_CLK 21 +#define CAM_CC_CPAS_IFE_LITE_CLK 22 +#define CAM_CC_CPAS_IPE_NPS_CLK 23 +#define CAM_CC_CPAS_SBI_CLK 24 +#define CAM_CC_CPAS_SFE_0_CLK 25 +#define CAM_CC_CPAS_SFE_1_CLK 26 +#define CAM_CC_CPHY_RX_CLK_SRC 27 +#define CAM_CC_CRE_AHB_CLK 28 +#define CAM_CC_CRE_CLK 29 +#define CAM_CC_CRE_CLK_SRC 30 +#define CAM_CC_CSI0PHYTIMER_CLK 31 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 32 +#define CAM_CC_CSI1PHYTIMER_CLK 33 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 34 +#define CAM_CC_CSI2PHYTIMER_CLK 35 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 36 +#define CAM_CC_CSI3PHYTIMER_CLK 37 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 38 +#define CAM_CC_CSI4PHYTIMER_CLK 39 +#define CAM_CC_CSI4PHYTIMER_CLK_SRC 40 +#define CAM_CC_CSI5PHYTIMER_CLK 41 +#define CAM_CC_CSI5PHYTIMER_CLK_SRC 42 +#define CAM_CC_CSI6PHYTIMER_CLK 43 +#define CAM_CC_CSI6PHYTIMER_CLK_SRC 44 +#define CAM_CC_CSI7PHYTIMER_CLK 45 +#define CAM_CC_CSI7PHYTIMER_CLK_SRC 46 +#define CAM_CC_CSID_CLK 47 +#define CAM_CC_CSID_CLK_SRC 48 +#define CAM_CC_CSID_CSIPHY_RX_CLK 49 +#define CAM_CC_CSIPHY0_CLK 50 +#define CAM_CC_CSIPHY1_CLK 51 +#define CAM_CC_CSIPHY2_CLK 52 +#define CAM_CC_CSIPHY3_CLK 53 +#define CAM_CC_CSIPHY4_CLK 54 +#define CAM_CC_CSIPHY5_CLK 55 +#define CAM_CC_CSIPHY6_CLK 56 +#define CAM_CC_CSIPHY7_CLK 57 +#define CAM_CC_DRV_AHB_CLK 58 +#define CAM_CC_DRV_XO_CLK 59 +#define CAM_CC_FAST_AHB_CLK_SRC 60 +#define CAM_CC_GDSC_CLK 61 +#define CAM_CC_ICP_AHB_CLK 62 +#define CAM_CC_ICP_CLK 63 +#define CAM_CC_ICP_CLK_SRC 64 +#define CAM_CC_IFE_0_CLK 65 +#define CAM_CC_IFE_0_CLK_SRC 66 +#define CAM_CC_IFE_0_DSP_CLK 67 +#define CAM_CC_IFE_0_DSP_CLK_SRC 68 +#define CAM_CC_IFE_0_FAST_AHB_CLK 69 +#define CAM_CC_IFE_1_CLK 70 +#define CAM_CC_IFE_1_CLK_SRC 71 +#define CAM_CC_IFE_1_DSP_CLK 72 +#define CAM_CC_IFE_1_DSP_CLK_SRC 73 +#define CAM_CC_IFE_1_FAST_AHB_CLK 74 +#define CAM_CC_IFE_2_CLK 75 +#define CAM_CC_IFE_2_CLK_SRC 76 +#define CAM_CC_IFE_2_DSP_CLK 77 +#define CAM_CC_IFE_2_DSP_CLK_SRC 78 +#define CAM_CC_IFE_2_FAST_AHB_CLK 79 +#define CAM_CC_IFE_LITE_AHB_CLK 80 +#define CAM_CC_IFE_LITE_CLK 81 +#define CAM_CC_IFE_LITE_CLK_SRC 82 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83 +#define CAM_CC_IFE_LITE_CSID_CLK 84 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85 +#define CAM_CC_IPE_NPS_AHB_CLK 86 +#define CAM_CC_IPE_NPS_CLK 87 +#define CAM_CC_IPE_NPS_CLK_SRC 88 +#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89 +#define CAM_CC_IPE_PPS_CLK 90 +#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91 +#define CAM_CC_JPEG_1_CLK 92 +#define CAM_CC_JPEG_CLK 93 +#define CAM_CC_JPEG_CLK_SRC 94 +#define CAM_CC_MCLK0_CLK 95 +#define CAM_CC_MCLK0_CLK_SRC 96 +#define CAM_CC_MCLK1_CLK 97 +#define CAM_CC_MCLK1_CLK_SRC 98 +#define CAM_CC_MCLK2_CLK 99 +#define CAM_CC_MCLK2_CLK_SRC 100 +#define CAM_CC_MCLK3_CLK 101 +#define CAM_CC_MCLK3_CLK_SRC 102 +#define CAM_CC_MCLK4_CLK 103 +#define CAM_CC_MCLK4_CLK_SRC 104 +#define CAM_CC_MCLK5_CLK 105 +#define CAM_CC_MCLK5_CLK_SRC 106 +#define CAM_CC_MCLK6_CLK 107 +#define CAM_CC_MCLK6_CLK_SRC 108 +#define CAM_CC_MCLK7_CLK 109 +#define CAM_CC_MCLK7_CLK_SRC 110 +#define CAM_CC_PLL0 111 +#define CAM_CC_PLL0_OUT_EVEN 112 +#define CAM_CC_PLL0_OUT_ODD 113 +#define CAM_CC_PLL1 114 +#define CAM_CC_PLL1_OUT_EVEN 115 +#define CAM_CC_PLL2 116 +#define CAM_CC_PLL3 117 +#define CAM_CC_PLL3_OUT_EVEN 118 +#define CAM_CC_PLL4 119 +#define CAM_CC_PLL4_OUT_EVEN 120 +#define CAM_CC_PLL5 121 +#define CAM_CC_PLL5_OUT_EVEN 122 +#define CAM_CC_PLL6 123 +#define CAM_CC_PLL6_OUT_EVEN 124 +#define CAM_CC_PLL7 125 +#define CAM_CC_PLL7_OUT_EVEN 126 +#define CAM_CC_PLL8 127 +#define CAM_CC_PLL8_OUT_EVEN 128 +#define CAM_CC_PLL9 129 +#define CAM_CC_PLL9_OUT_EVEN 130 +#define CAM_CC_PLL10 131 +#define CAM_CC_PLL10_OUT_EVEN 132 +#define CAM_CC_PLL11 133 +#define CAM_CC_PLL11_OUT_EVEN 134 +#define CAM_CC_PLL12 135 +#define CAM_CC_PLL12_OUT_EVEN 136 +#define CAM_CC_QDSS_DEBUG_CLK 137 +#define CAM_CC_QDSS_DEBUG_CLK_SRC 138 +#define CAM_CC_QDSS_DEBUG_XO_CLK 139 +#define CAM_CC_SBI_CLK 140 +#define CAM_CC_SBI_FAST_AHB_CLK 141 +#define CAM_CC_SFE_0_CLK 142 +#define CAM_CC_SFE_0_CLK_SRC 143 +#define CAM_CC_SFE_0_FAST_AHB_CLK 144 +#define CAM_CC_SFE_1_CLK 145 +#define CAM_CC_SFE_1_CLK_SRC 146 +#define CAM_CC_SFE_1_FAST_AHB_CLK 147 +#define CAM_CC_SLEEP_CLK 148 +#define CAM_CC_SLEEP_CLK_SRC 149 +#define CAM_CC_SLOW_AHB_CLK_SRC 150 +#define CAM_CC_XO_CLK_SRC 151 + +/* CAM_CC power domains */ +#define CAM_CC_BPS_GDSC 0 +#define CAM_CC_IFE_0_GDSC 1 +#define CAM_CC_IFE_1_GDSC 2 +#define CAM_CC_IFE_2_GDSC 3 +#define CAM_CC_IPE_0_GDSC 4 +#define CAM_CC_SBI_GDSC 5 +#define CAM_CC_SFE_0_GDSC 6 +#define CAM_CC_SFE_1_GDSC 7 +#define CAM_CC_TITAN_TOP_GDSC 8 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_DRV_BCR 1 +#define CAM_CC_ICP_BCR 2 +#define CAM_CC_IFE_0_BCR 3 +#define CAM_CC_IFE_1_BCR 4 +#define CAM_CC_IFE_2_BCR 5 +#define CAM_CC_IPE_0_BCR 6 +#define CAM_CC_QDSS_DEBUG_BCR 7 +#define CAM_CC_SBI_BCR 8 +#define CAM_CC_SFE_0_BCR 9 +#define CAM_CC_SFE_1_BCR 10 + +#endif diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h index c557b78dc5..e24ee840cf 100644 --- a/include/dt-bindings/clock/qcom,videocc-sm8150.h +++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h @@ -16,10 +16,6 @@ /* VIDEO_CC Resets */ #define VIDEO_CC_MVSC_CORE_CLK_BCR 0 -#define VIDEO_CC_INTERFACE_BCR 1 -#define VIDEO_CC_MVS0_BCR 2 -#define VIDEO_CC_MVS1_BCR 3 -#define VIDEO_CC_MVSC_BCR 4 /* VIDEO_CC GDSCRs */ #define VENUS_GDSC 0 diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h new file mode 100644 index 0000000000..410725b778 --- /dev/null +++ b/include/dt-bindings/clock/r9a08g045-cpg.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2023 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ +#define __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* R9A08G045 CPG Core Clocks */ +#define R9A08G045_CLK_I 0 +#define R9A08G045_CLK_I2 1 +#define R9A08G045_CLK_I3 2 +#define R9A08G045_CLK_S0 3 +#define R9A08G045_CLK_SPI0 4 +#define R9A08G045_CLK_SPI1 5 +#define R9A08G045_CLK_SD0 6 +#define R9A08G045_CLK_SD1 7 +#define R9A08G045_CLK_SD2 8 +#define R9A08G045_CLK_M0 9 +#define R9A08G045_CLK_HP 10 +#define R9A08G045_CLK_TSU 11 +#define R9A08G045_CLK_ZT 12 +#define R9A08G045_CLK_P0 13 +#define R9A08G045_CLK_P1 14 +#define R9A08G045_CLK_P2 15 +#define R9A08G045_CLK_P3 16 +#define R9A08G045_CLK_P4 17 +#define R9A08G045_CLK_P5 18 +#define R9A08G045_CLK_AT 19 +#define R9A08G045_CLK_OC0 20 +#define R9A08G045_CLK_OC1 21 +#define R9A08G045_OSCCLK 22 +#define R9A08G045_OSCCLK2 23 +#define R9A08G045_SWD 24 + +/* R9A08G045 Module Clocks */ +#define R9A08G045_OCTA_ACLK 0 +#define R9A08G045_OCTA_MCLK 1 +#define R9A08G045_CA55_SCLK 2 +#define R9A08G045_CA55_PCLK 3 +#define R9A08G045_CA55_ATCLK 4 +#define R9A08G045_CA55_GICCLK 5 +#define R9A08G045_CA55_PERICLK 6 +#define R9A08G045_CA55_ACLK 7 +#define R9A08G045_CA55_TSCLK 8 +#define R9A08G045_SRAM_ACPU_ACLK0 9 +#define R9A08G045_SRAM_ACPU_ACLK1 10 +#define R9A08G045_SRAM_ACPU_ACLK2 11 +#define R9A08G045_GIC600_GICCLK 12 +#define R9A08G045_IA55_CLK 13 +#define R9A08G045_IA55_PCLK 14 +#define R9A08G045_MHU_PCLK 15 +#define R9A08G045_SYC_CNT_CLK 16 +#define R9A08G045_DMAC_ACLK 17 +#define R9A08G045_DMAC_PCLK 18 +#define R9A08G045_OSTM0_PCLK 19 +#define R9A08G045_OSTM1_PCLK 20 +#define R9A08G045_OSTM2_PCLK 21 +#define R9A08G045_OSTM3_PCLK 22 +#define R9A08G045_OSTM4_PCLK 23 +#define R9A08G045_OSTM5_PCLK 24 +#define R9A08G045_OSTM6_PCLK 25 +#define R9A08G045_OSTM7_PCLK 26 +#define R9A08G045_MTU_X_MCK_MTU3 27 +#define R9A08G045_POE3_CLKM_POE 28 +#define R9A08G045_GPT_PCLK 29 +#define R9A08G045_POEG_A_CLKP 30 +#define R9A08G045_POEG_B_CLKP 31 +#define R9A08G045_POEG_C_CLKP 32 +#define R9A08G045_POEG_D_CLKP 33 +#define R9A08G045_WDT0_PCLK 34 +#define R9A08G045_WDT0_CLK 35 +#define R9A08G045_WDT1_PCLK 36 +#define R9A08G045_WDT1_CLK 37 +#define R9A08G045_WDT2_PCLK 38 +#define R9A08G045_WDT2_CLK 39 +#define R9A08G045_SPI_HCLK 40 +#define R9A08G045_SPI_ACLK 41 +#define R9A08G045_SPI_CLK 42 +#define R9A08G045_SPI_CLKX2 43 +#define R9A08G045_SDHI0_IMCLK 44 +#define R9A08G045_SDHI0_IMCLK2 45 +#define R9A08G045_SDHI0_CLK_HS 46 +#define R9A08G045_SDHI0_ACLK 47 +#define R9A08G045_SDHI1_IMCLK 48 +#define R9A08G045_SDHI1_IMCLK2 49 +#define R9A08G045_SDHI1_CLK_HS 50 +#define R9A08G045_SDHI1_ACLK 51 +#define R9A08G045_SDHI2_IMCLK 52 +#define R9A08G045_SDHI2_IMCLK2 53 +#define R9A08G045_SDHI2_CLK_HS 54 +#define R9A08G045_SDHI2_ACLK 55 +#define R9A08G045_SSI0_PCLK2 56 +#define R9A08G045_SSI0_PCLK_SFR 57 +#define R9A08G045_SSI1_PCLK2 58 +#define R9A08G045_SSI1_PCLK_SFR 59 +#define R9A08G045_SSI2_PCLK2 60 +#define R9A08G045_SSI2_PCLK_SFR 61 +#define R9A08G045_SSI3_PCLK2 62 +#define R9A08G045_SSI3_PCLK_SFR 63 +#define R9A08G045_SRC_CLKP 64 +#define R9A08G045_USB_U2H0_HCLK 65 +#define R9A08G045_USB_U2H1_HCLK 66 +#define R9A08G045_USB_U2P_EXR_CPUCLK 67 +#define R9A08G045_USB_PCLK 68 +#define R9A08G045_ETH0_CLK_AXI 69 +#define R9A08G045_ETH0_CLK_CHI 70 +#define R9A08G045_ETH0_REFCLK 71 +#define R9A08G045_ETH1_CLK_AXI 72 +#define R9A08G045_ETH1_CLK_CHI 73 +#define R9A08G045_ETH1_REFCLK 74 +#define R9A08G045_I2C0_PCLK 75 +#define R9A08G045_I2C1_PCLK 76 +#define R9A08G045_I2C2_PCLK 77 +#define R9A08G045_I2C3_PCLK 78 +#define R9A08G045_SCIF0_CLK_PCK 79 +#define R9A08G045_SCIF1_CLK_PCK 80 +#define R9A08G045_SCIF2_CLK_PCK 81 +#define R9A08G045_SCIF3_CLK_PCK 82 +#define R9A08G045_SCIF4_CLK_PCK 83 +#define R9A08G045_SCIF5_CLK_PCK 84 +#define R9A08G045_SCI0_CLKP 85 +#define R9A08G045_SCI1_CLKP 86 +#define R9A08G045_IRDA_CLKP 87 +#define R9A08G045_RSPI0_CLKB 88 +#define R9A08G045_RSPI1_CLKB 89 +#define R9A08G045_RSPI2_CLKB 90 +#define R9A08G045_RSPI3_CLKB 91 +#define R9A08G045_RSPI4_CLKB 92 +#define R9A08G045_CANFD_PCLK 93 +#define R9A08G045_CANFD_CLK_RAM 94 +#define R9A08G045_GPIO_HCLK 95 +#define R9A08G045_ADC_ADCLK 96 +#define R9A08G045_ADC_PCLK 97 +#define R9A08G045_TSU_PCLK 98 +#define R9A08G045_PDM_PCLK 99 +#define R9A08G045_PDM_CCLK 100 +#define R9A08G045_PCI_ACLK 101 +#define R9A08G045_PCI_CLKL1PM 102 +#define R9A08G045_SPDIF_PCLK 103 +#define R9A08G045_I3C_PCLK 104 +#define R9A08G045_I3C_TCLK 105 +#define R9A08G045_VBAT_BCLK 106 + +/* R9A08G045 Resets */ +#define R9A08G045_CA55_RST_1_0 0 +#define R9A08G045_CA55_RST_3_0 1 +#define R9A08G045_CA55_RST_4 2 +#define R9A08G045_CA55_RST_5 3 +#define R9A08G045_CA55_RST_6 4 +#define R9A08G045_CA55_RST_7 5 +#define R9A08G045_CA55_RST_8 6 +#define R9A08G045_CA55_RST_9 7 +#define R9A08G045_CA55_RST_10 8 +#define R9A08G045_CA55_RST_11 9 +#define R9A08G045_CA55_RST_12 10 +#define R9A08G045_SRAM_ACPU_ARESETN0 11 +#define R9A08G045_SRAM_ACPU_ARESETN1 12 +#define R9A08G045_SRAM_ACPU_ARESETN2 13 +#define R9A08G045_GIC600_GICRESET_N 14 +#define R9A08G045_GIC600_DBG_GICRESET_N 15 +#define R9A08G045_IA55_RESETN 16 +#define R9A08G045_MHU_RESETN 17 +#define R9A08G045_DMAC_ARESETN 18 +#define R9A08G045_DMAC_RST_ASYNC 19 +#define R9A08G045_SYC_RESETN 20 +#define R9A08G045_OSTM0_PRESETZ 21 +#define R9A08G045_OSTM1_PRESETZ 22 +#define R9A08G045_OSTM2_PRESETZ 23 +#define R9A08G045_OSTM3_PRESETZ 24 +#define R9A08G045_OSTM4_PRESETZ 25 +#define R9A08G045_OSTM5_PRESETZ 26 +#define R9A08G045_OSTM6_PRESETZ 27 +#define R9A08G045_OSTM7_PRESETZ 28 +#define R9A08G045_MTU_X_PRESET_MTU3 29 +#define R9A08G045_POE3_RST_M_REG 30 +#define R9A08G045_GPT_RST_C 31 +#define R9A08G045_POEG_A_RST 32 +#define R9A08G045_POEG_B_RST 33 +#define R9A08G045_POEG_C_RST 34 +#define R9A08G045_POEG_D_RST 35 +#define R9A08G045_WDT0_PRESETN 36 +#define R9A08G045_WDT1_PRESETN 37 +#define R9A08G045_WDT2_PRESETN 38 +#define R9A08G045_SPI_HRESETN 39 +#define R9A08G045_SPI_ARESETN 40 +#define R9A08G045_SDHI0_IXRST 41 +#define R9A08G045_SDHI1_IXRST 42 +#define R9A08G045_SDHI2_IXRST 43 +#define R9A08G045_SSI0_RST_M2_REG 44 +#define R9A08G045_SSI1_RST_M2_REG 45 +#define R9A08G045_SSI2_RST_M2_REG 46 +#define R9A08G045_SSI3_RST_M2_REG 47 +#define R9A08G045_SRC_RST 48 +#define R9A08G045_USB_U2H0_HRESETN 49 +#define R9A08G045_USB_U2H1_HRESETN 50 +#define R9A08G045_USB_U2P_EXL_SYSRST 51 +#define R9A08G045_USB_PRESETN 52 +#define R9A08G045_ETH0_RST_HW_N 53 +#define R9A08G045_ETH1_RST_HW_N 54 +#define R9A08G045_I2C0_MRST 55 +#define R9A08G045_I2C1_MRST 56 +#define R9A08G045_I2C2_MRST 57 +#define R9A08G045_I2C3_MRST 58 +#define R9A08G045_SCIF0_RST_SYSTEM_N 59 +#define R9A08G045_SCIF1_RST_SYSTEM_N 60 +#define R9A08G045_SCIF2_RST_SYSTEM_N 61 +#define R9A08G045_SCIF3_RST_SYSTEM_N 62 +#define R9A08G045_SCIF4_RST_SYSTEM_N 63 +#define R9A08G045_SCIF5_RST_SYSTEM_N 64 +#define R9A08G045_SCI0_RST 65 +#define R9A08G045_SCI1_RST 66 +#define R9A08G045_IRDA_RST 67 +#define R9A08G045_RSPI0_RST 68 +#define R9A08G045_RSPI1_RST 69 +#define R9A08G045_RSPI2_RST 70 +#define R9A08G045_RSPI3_RST 71 +#define R9A08G045_RSPI4_RST 72 +#define R9A08G045_CANFD_RSTP_N 73 +#define R9A08G045_CANFD_RSTC_N 74 +#define R9A08G045_GPIO_RSTN 75 +#define R9A08G045_GPIO_PORT_RESETN 76 +#define R9A08G045_GPIO_SPARE_RESETN 77 +#define R9A08G045_ADC_PRESETN 78 +#define R9A08G045_ADC_ADRST_N 79 +#define R9A08G045_TSU_PRESETN 80 +#define R9A08G045_OCTA_ARESETN 81 +#define R9A08G045_PDM0_PRESETNT 82 +#define R9A08G045_PCI_ARESETN 83 +#define R9A08G045_PCI_RST_B 84 +#define R9A08G045_PCI_RST_GP_B 85 +#define R9A08G045_PCI_RST_PS_B 86 +#define R9A08G045_PCI_RST_RSM_B 87 +#define R9A08G045_PCI_RST_CFG_B 88 +#define R9A08G045_PCI_RST_LOAD_B 89 +#define R9A08G045_SPDIF_RST 90 +#define R9A08G045_I3C_TRESETN 91 +#define R9A08G045_I3C_PRESETN 92 +#define R9A08G045_VBAT_BRESETN 93 + +#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */ diff --git a/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h new file mode 100644 index 0000000000..4e16d31a71 --- /dev/null +++ b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2023 Amlogic, Inc. All rights reserved. + * Author: Huqiang Qin <huqiang.qin@amlogic.com> + */ + +#ifndef _DT_BINDINGS_AMLOGIC_T7_GPIO_H +#define _DT_BINDINGS_AMLOGIC_T7_GPIO_H + +#define GPIOB_0 0 +#define GPIOB_1 1 +#define GPIOB_2 2 +#define GPIOB_3 3 +#define GPIOB_4 4 +#define GPIOB_5 5 +#define GPIOB_6 6 +#define GPIOB_7 7 +#define GPIOB_8 8 +#define GPIOB_9 9 +#define GPIOB_10 10 +#define GPIOB_11 11 +#define GPIOB_12 12 + +#define GPIOC_0 13 +#define GPIOC_1 14 +#define GPIOC_2 15 +#define GPIOC_3 16 +#define GPIOC_4 17 +#define GPIOC_5 18 +#define GPIOC_6 19 + +#define GPIOX_0 20 +#define GPIOX_1 21 +#define GPIOX_2 22 +#define GPIOX_3 23 +#define GPIOX_4 24 +#define GPIOX_5 25 +#define GPIOX_6 26 +#define GPIOX_7 27 +#define GPIOX_8 28 +#define GPIOX_9 29 +#define GPIOX_10 30 +#define GPIOX_11 31 +#define GPIOX_12 32 +#define GPIOX_13 33 +#define GPIOX_14 34 +#define GPIOX_15 35 +#define GPIOX_16 36 +#define GPIOX_17 37 +#define GPIOX_18 38 +#define GPIOX_19 39 + +#define GPIOW_0 40 +#define GPIOW_1 41 +#define GPIOW_2 42 +#define GPIOW_3 43 +#define GPIOW_4 44 +#define GPIOW_5 45 +#define GPIOW_6 46 +#define GPIOW_7 47 +#define GPIOW_8 48 +#define GPIOW_9 49 +#define GPIOW_10 50 +#define GPIOW_11 51 +#define GPIOW_12 52 +#define GPIOW_13 53 +#define GPIOW_14 54 +#define GPIOW_15 55 +#define GPIOW_16 56 + +#define GPIOD_0 57 +#define GPIOD_1 58 +#define GPIOD_2 59 +#define GPIOD_3 60 +#define GPIOD_4 61 +#define GPIOD_5 62 +#define GPIOD_6 63 +#define GPIOD_7 64 +#define GPIOD_8 65 +#define GPIOD_9 66 +#define GPIOD_10 67 +#define GPIOD_11 68 +#define GPIOD_12 69 + +#define GPIOE_0 70 +#define GPIOE_1 71 +#define GPIOE_2 72 +#define GPIOE_3 73 +#define GPIOE_4 74 +#define GPIOE_5 75 +#define GPIOE_6 76 + +#define GPIOZ_0 77 +#define GPIOZ_1 78 +#define GPIOZ_2 79 +#define GPIOZ_3 80 +#define GPIOZ_4 81 +#define GPIOZ_5 82 +#define GPIOZ_6 83 +#define GPIOZ_7 84 +#define GPIOZ_8 85 +#define GPIOZ_9 86 +#define GPIOZ_10 87 +#define GPIOZ_11 88 +#define GPIOZ_12 89 +#define GPIOZ_13 90 + +#define GPIOT_0 91 +#define GPIOT_1 92 +#define GPIOT_2 93 +#define GPIOT_3 94 +#define GPIOT_4 95 +#define GPIOT_5 96 +#define GPIOT_6 97 +#define GPIOT_7 98 +#define GPIOT_8 99 +#define GPIOT_9 100 +#define GPIOT_10 101 +#define GPIOT_11 102 +#define GPIOT_12 103 +#define GPIOT_13 104 +#define GPIOT_14 105 +#define GPIOT_15 106 +#define GPIOT_16 107 +#define GPIOT_17 108 +#define GPIOT_18 109 +#define GPIOT_19 110 +#define GPIOT_20 111 +#define GPIOT_21 112 +#define GPIOT_22 113 +#define GPIOT_23 114 + +#define GPIOM_0 115 +#define GPIOM_1 116 +#define GPIOM_2 117 +#define GPIOM_3 118 +#define GPIOM_4 119 +#define GPIOM_5 120 +#define GPIOM_6 121 +#define GPIOM_7 122 +#define GPIOM_8 123 +#define GPIOM_9 124 +#define GPIOM_10 125 +#define GPIOM_11 126 +#define GPIOM_12 127 +#define GPIOM_13 128 + +#define GPIOY_0 129 +#define GPIOY_1 130 +#define GPIOY_2 131 +#define GPIOY_3 132 +#define GPIOY_4 133 +#define GPIOY_5 134 +#define GPIOY_6 135 +#define GPIOY_7 136 +#define GPIOY_8 137 +#define GPIOY_9 138 +#define GPIOY_10 139 +#define GPIOY_11 140 +#define GPIOY_12 141 +#define GPIOY_13 142 +#define GPIOY_14 143 +#define GPIOY_15 144 +#define GPIOY_16 145 +#define GPIOY_17 146 +#define GPIOY_18 147 + +#define GPIOH_0 148 +#define GPIOH_1 149 +#define GPIOH_2 150 +#define GPIOH_3 151 +#define GPIOH_4 152 +#define GPIOH_5 153 +#define GPIOH_6 154 +#define GPIOH_7 155 + +#define GPIO_TEST_N 156 + +#endif /* _DT_BINDINGS_AMLOGIC_T7_GPIO_H */ diff --git a/include/dt-bindings/interconnect/qcom,sdx75.h b/include/dt-bindings/interconnect/qcom,sdx75.h new file mode 100644 index 0000000000..e903f5f3dd --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdx75.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H + +#define MASTER_QPIC_CORE 0 +#define MASTER_QUP_CORE_0 1 +#define SLAVE_QPIC_CORE 2 +#define SLAVE_QUP_CORE_0 3 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LAGG_CFG 1 +#define SLAVE_MCCC_MASTER 2 +#define SLAVE_GEM_NOC_CFG 3 +#define SLAVE_SNOOP_BWMON 4 + +#define MASTER_SYS_TCU 0 +#define MASTER_APPSS_PROC 1 +#define MASTER_GEM_NOC_CFG 2 +#define MASTER_MSS_PROC 3 +#define MASTER_ANOC_PCIE_GEM_NOC 4 +#define MASTER_SNOC_SF_MEM_NOC 5 +#define MASTER_GIC 6 +#define MASTER_IPA_PCIE 7 +#define SLAVE_GEM_NOC_CNOC 8 +#define SLAVE_LLCC 9 +#define SLAVE_MEM_NOC_PCIE_SNOC 10 +#define SLAVE_SERVICE_GEM_NOC 11 + +#define MASTER_PCIE_0 0 +#define MASTER_PCIE_1 1 +#define MASTER_PCIE_2 2 +#define SLAVE_ANOC_PCIE_GEM_NOC 3 + +#define MASTER_AUDIO 0 +#define MASTER_GIC_AHB 1 +#define MASTER_PCIE_RSCC 2 +#define MASTER_QDSS_BAM 3 +#define MASTER_QPIC 4 +#define MASTER_QUP_0 5 +#define MASTER_ANOC_SNOC 6 +#define MASTER_GEM_NOC_CNOC 7 +#define MASTER_GEM_NOC_PCIE_SNOC 8 +#define MASTER_SNOC_CFG 9 +#define MASTER_PCIE_ANOC_CFG 10 +#define MASTER_CRYPTO 11 +#define MASTER_IPA 12 +#define MASTER_MVMSS 13 +#define MASTER_EMAC_0 14 +#define MASTER_EMAC_1 15 +#define MASTER_QDSS_ETR 16 +#define MASTER_QDSS_ETR_1 17 +#define MASTER_SDCC_1 18 +#define MASTER_SDCC_4 19 +#define MASTER_USB3_0 20 +#define SLAVE_ETH0_CFG 21 +#define SLAVE_ETH1_CFG 22 +#define SLAVE_AUDIO 23 +#define SLAVE_CLK_CTL 24 +#define SLAVE_CRYPTO_0_CFG 25 +#define SLAVE_IMEM_CFG 26 +#define SLAVE_IPA_CFG 27 +#define SLAVE_IPC_ROUTER_CFG 28 +#define SLAVE_CNOC_MSS 29 +#define SLAVE_ICBDI_MVMSS_CFG 30 +#define SLAVE_PCIE_0_CFG 31 +#define SLAVE_PCIE_1_CFG 32 +#define SLAVE_PCIE_2_CFG 33 +#define SLAVE_PCIE_RSC_CFG 34 +#define SLAVE_PDM 35 +#define SLAVE_PRNG 36 +#define SLAVE_QDSS_CFG 37 +#define SLAVE_QPIC 38 +#define SLAVE_QUP_0 39 +#define SLAVE_SDCC_1 40 +#define SLAVE_SDCC_4 41 +#define SLAVE_SPMI_VGI_COEX 42 +#define SLAVE_TCSR 43 +#define SLAVE_TLMM 44 +#define SLAVE_USB3 45 +#define SLAVE_USB3_PHY_CFG 46 +#define SLAVE_A1NOC_CFG 47 +#define SLAVE_DDRSS_CFG 48 +#define SLAVE_SNOC_GEM_NOC_SF 49 +#define SLAVE_SNOC_CFG 50 +#define SLAVE_PCIE_ANOC_CFG 51 +#define SLAVE_IMEM 52 +#define SLAVE_SERVICE_PCIE_ANOC 53 +#define SLAVE_SERVICE_SNOC 54 +#define SLAVE_PCIE_0 55 +#define SLAVE_PCIE_1 56 +#define SLAVE_PCIE_2 57 +#define SLAVE_QDSS_STM 58 +#define SLAVE_TCU 59 + +#endif diff --git a/include/dt-bindings/power/amlogic,t7-pwrc.h b/include/dt-bindings/power/amlogic,t7-pwrc.h new file mode 100644 index 0000000000..1f1f2739cc --- /dev/null +++ b/include/dt-bindings/power/amlogic,t7-pwrc.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */ +/* + * Copyright (c) 2023 Amlogic, Inc. + * Author: Hongyu Chen <hongyu.chen1@amlogic.com> + */ +#ifndef _DT_BINDINGS_AMLOGIC_T7_POWER_H +#define _DT_BINDINGS_AMLOGIC_T7_POWER_H + +#define PWRC_T7_DSPA_ID 0 +#define PWRC_T7_DSPB_ID 1 +#define PWRC_T7_DOS_HCODEC_ID 2 +#define PWRC_T7_DOS_HEVC_ID 3 +#define PWRC_T7_DOS_VDEC_ID 4 +#define PWRC_T7_DOS_WAVE_ID 5 +#define PWRC_T7_VPU_HDMI_ID 6 +#define PWRC_T7_USB_COMB_ID 7 +#define PWRC_T7_PCIE_ID 8 +#define PWRC_T7_GE2D_ID 9 +#define PWRC_T7_SRAMA_ID 10 +#define PWRC_T7_SRAMB_ID 11 +#define PWRC_T7_HDMIRX_ID 12 +#define PWRC_T7_VI_CLK1_ID 13 +#define PWRC_T7_VI_CLK2_ID 14 +#define PWRC_T7_ETH_ID 15 +#define PWRC_T7_ISP_ID 16 +#define PWRC_T7_MIPI_ISP_ID 17 +#define PWRC_T7_GDC_ID 18 +#define PWRC_T7_CVE_ID 18 +#define PWRC_T7_DEWARP_ID 19 +#define PWRC_T7_SDIO_A_ID 20 +#define PWRC_T7_SDIO_B_ID 21 +#define PWRC_T7_EMMC_ID 22 +#define PWRC_T7_MALI_SC0_ID 23 +#define PWRC_T7_MALI_SC1_ID 24 +#define PWRC_T7_MALI_SC2_ID 25 +#define PWRC_T7_MALI_SC3_ID 26 +#define PWRC_T7_MALI_TOP_ID 27 +#define PWRC_T7_NNA_CORE0_ID 28 +#define PWRC_T7_NNA_CORE1_ID 29 +#define PWRC_T7_NNA_CORE2_ID 30 +#define PWRC_T7_NNA_CORE3_ID 31 +#define PWRC_T7_NNA_TOP_ID 32 +#define PWRC_T7_DDR0_ID 33 +#define PWRC_T7_DDR1_ID 34 +#define PWRC_T7_DMC0_ID 35 +#define PWRC_T7_DMC1_ID 36 +#define PWRC_T7_NOC_ID 37 +#define PWRC_T7_NIC2_ID 38 +#define PWRC_T7_NIC3_ID 39 +#define PWRC_T7_CCI_ID 40 +#define PWRC_T7_MIPI_DSI0_ID 41 +#define PWRC_T7_SPICC0_ID 42 +#define PWRC_T7_SPICC1_ID 43 +#define PWRC_T7_SPICC2_ID 44 +#define PWRC_T7_SPICC3_ID 45 +#define PWRC_T7_SPICC4_ID 46 +#define PWRC_T7_SPICC5_ID 47 +#define PWRC_T7_EDP0_ID 48 +#define PWRC_T7_EDP1_ID 49 +#define PWRC_T7_MIPI_DSI1_ID 50 +#define PWRC_T7_AUDIO_ID 51 + +#endif diff --git a/include/dt-bindings/power/mediatek,mt8365-power.h b/include/dt-bindings/power/mediatek,mt8365-power.h new file mode 100644 index 0000000000..e6cfd0ec78 --- /dev/null +++ b/include/dt-bindings/power/mediatek,mt8365-power.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2022 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_POWER_MT8365_POWER_H +#define _DT_BINDINGS_POWER_MT8365_POWER_H + +#define MT8365_POWER_DOMAIN_MM 0 +#define MT8365_POWER_DOMAIN_CONN 1 +#define MT8365_POWER_DOMAIN_MFG 2 +#define MT8365_POWER_DOMAIN_AUDIO 3 +#define MT8365_POWER_DOMAIN_CAM 4 +#define MT8365_POWER_DOMAIN_DSP 5 +#define MT8365_POWER_DOMAIN_VDEC 6 +#define MT8365_POWER_DOMAIN_VENC 7 +#define MT8365_POWER_DOMAIN_APU 8 + +#endif /* _DT_BINDINGS_POWER_MT8365_POWER_H */ diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h index 7c201a66bc..e54ffa3614 100644 --- a/include/dt-bindings/power/qcom,rpmhpd.h +++ b/include/dt-bindings/power/qcom,rpmhpd.h @@ -26,5 +26,7 @@ #define RPMHPD_QPHY 16 #define RPMHPD_DDR 17 #define RPMHPD_XO 18 +#define RPMHPD_NSP2 19 +#define RPMHPD_GMXC 20 #endif diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 83be996cb5..7f4e2983a4 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -278,6 +278,27 @@ #define MSM8909_VDDMX MSM8916_VDDMX #define MSM8909_VDDMX_AO MSM8916_VDDMX_AO +/* MSM8917 Power Domain Indexes */ +#define MSM8917_VDDCX 0 +#define MSM8917_VDDCX_AO 1 +#define MSM8917_VDDCX_VFL 2 +#define MSM8917_VDDMX 3 +#define MSM8917_VDDMX_AO 4 + +/* MSM8937 Power Domain Indexes */ +#define MSM8937_VDDCX MSM8917_VDDCX +#define MSM8937_VDDCX_AO MSM8917_VDDCX_AO +#define MSM8937_VDDCX_VFL MSM8917_VDDCX_VFL +#define MSM8937_VDDMX MSM8917_VDDMX +#define MSM8937_VDDMX_AO MSM8917_VDDMX_AO + +/* QM215 Power Domain Indexes */ +#define QM215_VDDCX MSM8917_VDDCX +#define QM215_VDDCX_AO MSM8917_VDDCX_AO +#define QM215_VDDCX_VFL MSM8917_VDDCX_VFL +#define QM215_VDDMX MSM8917_VDDMX +#define QM215_VDDMX_AO MSM8917_VDDMX_AO + /* MSM8953 Power Domain Indexes */ #define MSM8953_VDDMD 0 #define MSM8953_VDDMD_AO 1 diff --git a/include/dt-bindings/power/starfive,jh7110-pmu.h b/include/dt-bindings/power/starfive,jh7110-pmu.h index 132bfe401f..7b4f24927d 100644 --- a/include/dt-bindings/power/starfive,jh7110-pmu.h +++ b/include/dt-bindings/power/starfive,jh7110-pmu.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* - * Copyright (C) 2022 StarFive Technology Co., Ltd. + * Copyright (C) 2022-2023 StarFive Technology Co., Ltd. * Author: Walker Chen <walker.chen@starfivetech.com> */ #ifndef __DT_BINDINGS_POWER_JH7110_POWER_H__ @@ -14,4 +14,8 @@ #define JH7110_PD_ISP 5 #define JH7110_PD_VENC 6 +/* AON Power Domain */ +#define JH7110_AON_PD_DPHY_TX 0 +#define JH7110_AON_PD_DPHY_RX 1 + #endif diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h index 8fa5a46675..997e2f5512 100644 --- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h +++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h @@ -7,6 +7,15 @@ #ifndef __MEDIATEK_LVTS_DT_H #define __MEDIATEK_LVTS_DT_H +#define MT7988_CPU_0 0 +#define MT7988_CPU_1 1 +#define MT7988_ETH2P5G_0 2 +#define MT7988_ETH2P5G_1 3 +#define MT7988_TOPS_0 4 +#define MT7988_TOPS_1 5 +#define MT7988_ETHWARP_0 6 +#define MT7988_ETHWARP_1 7 + #define MT8195_MCU_BIG_CPU0 0 #define MT8195_MCU_BIG_CPU1 1 #define MT8195_MCU_BIG_CPU2 2 @@ -26,4 +35,23 @@ #define MT8195_AP_CAM0 15 #define MT8195_AP_CAM1 16 +#define MT8192_MCU_BIG_CPU0 0 +#define MT8192_MCU_BIG_CPU1 1 +#define MT8192_MCU_BIG_CPU2 2 +#define MT8192_MCU_BIG_CPU3 3 +#define MT8192_MCU_LITTLE_CPU0 4 +#define MT8192_MCU_LITTLE_CPU1 5 +#define MT8192_MCU_LITTLE_CPU2 6 +#define MT8192_MCU_LITTLE_CPU3 7 + +#define MT8192_AP_VPU0 8 +#define MT8192_AP_VPU1 9 +#define MT8192_AP_GPU0 10 +#define MT8192_AP_GPU1 11 +#define MT8192_AP_INFRA 12 +#define MT8192_AP_CAM 13 +#define MT8192_AP_MD0 14 +#define MT8192_AP_MD1 15 +#define MT8192_AP_MD2 16 + #endif /* __MEDIATEK_LVTS_DT_H */ diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h new file mode 100644 index 0000000000..7ae6d84b2b --- /dev/null +++ b/include/dt-bindings/watchdog/aspeed-wdt.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef DT_BINDINGS_ASPEED_WDT_H +#define DT_BINDINGS_ASPEED_WDT_H + +#define AST2500_WDT_RESET_CPU (1 << 0) +#define AST2500_WDT_RESET_COPROC (1 << 1) +#define AST2500_WDT_RESET_SDRAM (1 << 2) +#define AST2500_WDT_RESET_AHB (1 << 3) +#define AST2500_WDT_RESET_I2C (1 << 4) +#define AST2500_WDT_RESET_MAC0 (1 << 5) +#define AST2500_WDT_RESET_MAC1 (1 << 6) +#define AST2500_WDT_RESET_GRAPHICS (1 << 7) +#define AST2500_WDT_RESET_USB2_HOST_HUB (1 << 8) +#define AST2500_WDT_RESET_USB_HOST (1 << 9) +#define AST2500_WDT_RESET_HID_EHCI (1 << 10) +#define AST2500_WDT_RESET_VIDEO (1 << 11) +#define AST2500_WDT_RESET_HAC (1 << 12) +#define AST2500_WDT_RESET_LPC (1 << 13) +#define AST2500_WDT_RESET_SDIO (1 << 14) +#define AST2500_WDT_RESET_MIC (1 << 15) +#define AST2500_WDT_RESET_CRT (1 << 16) +#define AST2500_WDT_RESET_PWM (1 << 17) +#define AST2500_WDT_RESET_PECI (1 << 18) +#define AST2500_WDT_RESET_JTAG (1 << 19) +#define AST2500_WDT_RESET_ADC (1 << 20) +#define AST2500_WDT_RESET_GPIO (1 << 21) +#define AST2500_WDT_RESET_MCTP (1 << 22) +#define AST2500_WDT_RESET_XDMA (1 << 23) +#define AST2500_WDT_RESET_SPI (1 << 24) +#define AST2500_WDT_RESET_SOC_MISC (1 << 25) + +#define AST2500_WDT_RESET_DEFAULT 0x023ffff3 + +#define AST2600_WDT_RESET1_CPU (1 << 0) +#define AST2600_WDT_RESET1_SDRAM (1 << 1) +#define AST2600_WDT_RESET1_AHB (1 << 2) +#define AST2600_WDT_RESET1_SLI (1 << 3) +#define AST2600_WDT_RESET1_SOC_MISC0 (1 << 4) +#define AST2600_WDT_RESET1_COPROC (1 << 5) +#define AST2600_WDT_RESET1_USB_A (1 << 6) +#define AST2600_WDT_RESET1_USB_B (1 << 7) +#define AST2600_WDT_RESET1_UHCI (1 << 8) +#define AST2600_WDT_RESET1_GRAPHICS (1 << 9) +#define AST2600_WDT_RESET1_CRT (1 << 10) +#define AST2600_WDT_RESET1_VIDEO (1 << 11) +#define AST2600_WDT_RESET1_HAC (1 << 12) +#define AST2600_WDT_RESET1_DP (1 << 13) +#define AST2600_WDT_RESET1_DP_MCU (1 << 14) +#define AST2600_WDT_RESET1_GP_MCU (1 << 15) +#define AST2600_WDT_RESET1_MAC0 (1 << 16) +#define AST2600_WDT_RESET1_MAC1 (1 << 17) +#define AST2600_WDT_RESET1_SDIO0 (1 << 18) +#define AST2600_WDT_RESET1_JTAG0 (1 << 19) +#define AST2600_WDT_RESET1_MCTP0 (1 << 20) +#define AST2600_WDT_RESET1_MCTP1 (1 << 21) +#define AST2600_WDT_RESET1_XDMA0 (1 << 22) +#define AST2600_WDT_RESET1_XDMA1 (1 << 23) +#define AST2600_WDT_RESET1_GPIO0 (1 << 24) +#define AST2600_WDT_RESET1_RVAS (1 << 25) + +#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1 + +#define AST2600_WDT_RESET2_CPU (1 << 0) +#define AST2600_WDT_RESET2_SPI (1 << 1) +#define AST2600_WDT_RESET2_AHB2 (1 << 2) +#define AST2600_WDT_RESET2_SLI2 (1 << 3) +#define AST2600_WDT_RESET2_SOC_MISC1 (1 << 4) +#define AST2600_WDT_RESET2_MAC2 (1 << 5) +#define AST2600_WDT_RESET2_MAC3 (1 << 6) +#define AST2600_WDT_RESET2_SDIO1 (1 << 7) +#define AST2600_WDT_RESET2_JTAG1 (1 << 8) +#define AST2600_WDT_RESET2_GPIO1 (1 << 9) +#define AST2600_WDT_RESET2_MDIO (1 << 10) +#define AST2600_WDT_RESET2_LPC (1 << 11) +#define AST2600_WDT_RESET2_PECI (1 << 12) +#define AST2600_WDT_RESET2_PWM (1 << 13) +#define AST2600_WDT_RESET2_ADC (1 << 14) +#define AST2600_WDT_RESET2_FSI (1 << 15) +#define AST2600_WDT_RESET2_I2C (1 << 16) +#define AST2600_WDT_RESET2_I3C_GLOBAL (1 << 17) +#define AST2600_WDT_RESET2_I3C0 (1 << 18) +#define AST2600_WDT_RESET2_I3C1 (1 << 19) +#define AST2600_WDT_RESET2_I3C2 (1 << 20) +#define AST2600_WDT_RESET2_I3C3 (1 << 21) +#define AST2600_WDT_RESET2_I3C4 (1 << 22) +#define AST2600_WDT_RESET2_I3C5 (1 << 23) +#define AST2600_WDT_RESET2_ESPI (1 << 26) + +#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1 + +#endif diff --git a/include/kunit/resource.h b/include/kunit/resource.h index c7383e90f5..4ad69a2642 100644 --- a/include/kunit/resource.h +++ b/include/kunit/resource.h @@ -391,6 +391,27 @@ void kunit_remove_resource(struct kunit *test, struct kunit_resource *res); typedef void (kunit_action_t)(void *); /** + * KUNIT_DEFINE_ACTION_WRAPPER() - Wrap a function for use as a deferred action. + * + * @wrapper: The name of the new wrapper function define. + * @orig: The original function to wrap. + * @arg_type: The type of the argument accepted by @orig. + * + * Defines a wrapper for a function which accepts a single, pointer-sized + * argument. This wrapper can then be passed to kunit_add_action() and + * similar. This should be used in preference to casting a function + * directly to kunit_action_t, as casting function pointers will break + * control flow integrity (CFI), leading to crashes. + */ +#define KUNIT_DEFINE_ACTION_WRAPPER(wrapper, orig, arg_type) \ + static void wrapper(void *in) \ + { \ + arg_type arg = (arg_type)in; \ + orig(arg); \ + } + + +/** * kunit_add_action() - Call a function when the test ends. * @test: Test case to associate the action with. * @action: The function to run on test exit diff --git a/include/kunit/test.h b/include/kunit/test.h index 68ff01aee2..20ed9f9275 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -33,9 +33,7 @@ DECLARE_STATIC_KEY_FALSE(kunit_running); struct kunit; - -/* Size of log associated with test. */ -#define KUNIT_LOG_SIZE 2048 +struct string_stream; /* Maximum size of parameter description string. */ #define KUNIT_PARAM_DESC_SIZE 128 @@ -133,7 +131,7 @@ struct kunit_case { /* private: internal use only. */ enum kunit_status status; char *module_name; - char *log; + struct string_stream *log; }; static inline char *kunit_status_to_ok_not_ok(enum kunit_status status) @@ -253,7 +251,7 @@ struct kunit_suite { /* private: internal use only */ char status_comment[KUNIT_STATUS_COMMENT_SIZE]; struct dentry *debugfs; - char *log; + struct string_stream *log; int suite_init_err; }; @@ -279,7 +277,7 @@ struct kunit { /* private: internal use only. */ const char *name; /* Read only after initialization! */ - char *log; /* Points at case log after initialization */ + struct string_stream *log; /* Points at case log after initialization */ struct kunit_try_catch try_catch; /* param_value is the current parameter value for a test case. */ const void *param_value; @@ -315,7 +313,7 @@ const char *kunit_filter_glob(void); char *kunit_filter(void); char *kunit_filter_action(void); -void kunit_init_test(struct kunit *test, const char *name, char *log); +void kunit_init_test(struct kunit *test, const char *name, struct string_stream *log); int kunit_run_tests(struct kunit_suite *suite); @@ -473,7 +471,7 @@ static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp void kunit_cleanup(struct kunit *test); -void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...); +void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...); /** * kunit_mark_skipped() - Marks @test_or_suite as skipped diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index e748bc957d..c819c5d166 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -96,7 +96,7 @@ struct arch_timer_cpu { int __init kvm_timer_hyp_init(bool has_gic); int kvm_timer_enable(struct kvm_vcpu *vcpu); -int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 31029f4f7b..4b9d8fb393 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -13,7 +13,6 @@ #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) - struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; @@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); +void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, @@ -77,7 +77,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_resync_el0(void); #define kvm_vcpu_has_pmu(vcpu) \ - (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) + (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) /* * Updates the vcpu's view of the pmu events for this cpu. @@ -101,7 +101,11 @@ void kvm_vcpu_pmu_resync_el0(void); }) u8 kvm_arm_pmu_get_pmuver_limit(void); +u64 kvm_pmu_evtyper_mask(struct kvm *kvm); +int kvm_arm_set_default_pmu(struct kvm *kvm); +u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -168,12 +172,32 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} static inline u8 kvm_arm_pmu_get_pmuver_limit(void) { return 0; } +static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + return 0; +} static inline void kvm_vcpu_pmu_resync_el0(void) {} +static inline int kvm_arm_set_default_pmu(struct kvm *kvm) +{ + return -ENODEV; +} + +static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) +{ + return 0; +} + +static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return 0; +} + #endif #endif diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 6e55b92837..e8fb624013 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu) * revisions. It is thus safe to return the latest, unless * userspace has instructed us otherwise. */ - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { + if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) { if (vcpu->kvm->arch.psci_version) return vcpu->kvm->arch.psci_version; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 5b27f94d4f..8cc38e836f 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -375,8 +375,8 @@ int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); void kvm_vgic_init_cpu_hardware(void); -int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, - bool level, void *owner); +int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, + unsigned int intid, bool level, void *owner); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index afd94c9b8b..4db54e928b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -30,6 +30,7 @@ struct irq_domain_ops; #include <linux/dynamic_debug.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/fw_table.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -37,6 +38,16 @@ struct irq_domain_ops; #include <acpi/acpi_io.h> #include <asm/acpi.h> +#ifdef CONFIG_ACPI_TABLE_LIB +#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) +#define __init_or_acpilib +#define __initdata_or_acpilib +#else +#define EXPORT_SYMBOL_ACPI_LIB(x) +#define __init_or_acpilib __init +#define __initdata_or_acpilib __initdata +#endif + static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return adev ? adev->handle : NULL; @@ -119,21 +130,8 @@ enum acpi_address_range_id { /* Table Handlers */ -union acpi_subtable_headers { - struct acpi_subtable_header common; - struct acpi_hmat_structure hmat; - struct acpi_prmt_module_header prmt; - struct acpi_cedt_header cedt; -}; - typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); -typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header, - const unsigned long end); - -typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header, - void *arg, const unsigned long end); - /* Debugger support */ struct acpi_debugger_ops { @@ -207,14 +205,6 @@ static inline int acpi_debugger_notify_command_complete(void) (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) -struct acpi_subtable_proc { - int id; - acpi_tbl_entry_handler handler; - acpi_tbl_entry_handler_arg handler_arg; - void *arg; - int count; -}; - void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); void __acpi_unmap_table(void __iomem *map, unsigned long size); int early_acpi_boot_init(void); @@ -229,16 +219,6 @@ void acpi_reserve_initial_tables (void); void acpi_table_init_complete (void); int acpi_table_init (void); -#ifdef CONFIG_ACPI_TABLE_LIB -#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) -#define __init_or_acpilib -#define __initdata_or_acpilib -#else -#define EXPORT_SYMBOL_ACPI_LIB(x) -#define __init_or_acpilib __init -#define __initdata_or_acpilib __initdata -#endif - int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init_or_acpilib acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, @@ -256,10 +236,15 @@ acpi_table_parse_cedt(enum acpi_cedt_type id, int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); +static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc) +{ + return gicc->flags & ACPI_MADT_ENABLED; +} + /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -787,6 +772,11 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) struct acpi_device; +static inline bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2) +{ + return false; +} + static inline bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2) { @@ -1114,15 +1104,8 @@ static inline int acpi_get_lps0_constraint(struct device *dev) return ACPI_STATE_UNKNOWN; } #endif /* CONFIG_SUSPEND && CONFIG_X86 */ -#ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else -static inline void arch_reserve_mem_area(acpi_physical_address addr, - size_t size) -{ -} -#endif /* CONFIG_X86 */ -#else #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif @@ -1548,4 +1531,9 @@ static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif +static inline void acpi_use_parent_companion(struct device *dev) +{ + ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent)); +} + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/aer.h b/include/linux/aer.h index 29cc102209..f6ea2f57d8 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -51,7 +51,7 @@ static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev) static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; } #endif -void cper_print_aer(struct pci_dev *dev, int aer_severity, +void pci_print_aer(struct pci_dev *dev, int aer_severity, struct aer_capability_regs *aer); int cper_severity_to_aer(int cper_severity); void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 99a5201d9e..dc7ed2f468 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -33,126 +33,6 @@ struct pci_dev; extern int amd_iommu_detect(void); -/** - * amd_iommu_init_device() - Init device for use with IOMMUv2 driver - * @pdev: The PCI device to initialize - * @pasids: Number of PASIDs to support for this device - * - * This function does all setup for the device pdev so that it can be - * used with IOMMUv2. - * Returns 0 on success or negative value on error. - */ -extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids); - -/** - * amd_iommu_free_device() - Free all IOMMUv2 related device resources - * and disable IOMMUv2 usage for this device - * @pdev: The PCI device to disable IOMMUv2 usage for' - */ -extern void amd_iommu_free_device(struct pci_dev *pdev); - -/** - * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device - * @pdev: The PCI device to bind the task to - * @pasid: The PASID on the device the task should be bound to - * @task: the task to bind - * - * The function returns 0 on success or a negative value on error. - */ -extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid, - struct task_struct *task); - -/** - * amd_iommu_unbind_pasid() - Unbind a PASID from its task on - * a device - * @pdev: The device of the PASID - * @pasid: The PASID to unbind - * - * When this function returns the device is no longer using the PASID - * and the PASID is no longer bound to its task. - */ -extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid); - -/** - * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed - * PRI requests - * @pdev: The PCI device the call-back should be registered for - * @cb: The call-back function - * - * The IOMMUv2 driver invokes this call-back when it is unable to - * successfully handle a PRI request. The device driver can then decide - * which PRI response the device should see. Possible return values for - * the call-back are: - * - * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device - * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device - * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device, - * the device is required to disable - * PRI when it receives this response - * - * The function returns 0 on success or negative value on error. - */ -#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0 -#define AMD_IOMMU_INV_PRI_RSP_INVALID 1 -#define AMD_IOMMU_INV_PRI_RSP_FAIL 2 - -typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, - u32 pasid, - unsigned long address, - u16); - -extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, - amd_iommu_invalid_ppr_cb cb); - -#define PPR_FAULT_EXEC (1 << 1) -#define PPR_FAULT_READ (1 << 2) -#define PPR_FAULT_WRITE (1 << 5) -#define PPR_FAULT_USER (1 << 6) -#define PPR_FAULT_RSVD (1 << 7) -#define PPR_FAULT_GN (1 << 8) - -/** - * amd_iommu_device_info() - Get information about IOMMUv2 support of a - * PCI device - * @pdev: PCI device to query information from - * @info: A pointer to an amd_iommu_device_info structure which will contain - * the information about the PCI device - * - * Returns 0 on success, negative value on error - */ - -#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ -#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ -#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ -#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution - on memory pages */ -#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request - super-user privileges */ - -struct amd_iommu_device_info { - int max_pasids; - u32 flags; -}; - -extern int amd_iommu_device_info(struct pci_dev *pdev, - struct amd_iommu_device_info *info); - -/** - * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating - * a pasid context. This call-back is - * invoked when the IOMMUv2 driver needs to - * invalidate a PASID context, for example - * because the task that is bound to that - * context is about to exit. - * - * @pdev: The PCI device the call-back should be registered for - * @cb: The call-back function - */ - -typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid); - -extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, - amd_iommu_invalidate_ctx cb); #else /* CONFIG_AMD_IOMMU */ static inline int amd_iommu_detect(void) { return -ENODEV; } diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index cc060da51b..3d0fde57ba 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -6,6 +6,7 @@ #ifndef _LINUX_ARM_FFA_H #define _LINUX_ARM_FFA_H +#include <linux/bitfield.h> #include <linux/device.h> #include <linux/module.h> #include <linux/types.h> @@ -20,6 +21,7 @@ #define FFA_ERROR FFA_SMC_32(0x60) #define FFA_SUCCESS FFA_SMC_32(0x61) +#define FFA_FN64_SUCCESS FFA_SMC_64(0x61) #define FFA_INTERRUPT FFA_SMC_32(0x62) #define FFA_VERSION FFA_SMC_32(0x63) #define FFA_FEATURES FFA_SMC_32(0x64) @@ -54,6 +56,23 @@ #define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) #define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) #define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) +#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D) +#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E) +#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F) +#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80) +#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81) +#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82) +#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83) +#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83) +#define FFA_RX_ACQUIRE FFA_SMC_32(0x84) +#define FFA_SPM_ID_GET FFA_SMC_32(0x85) +#define FFA_MSG_SEND2 FFA_SMC_32(0x86) +#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87) +#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87) +#define FFA_MEM_PERM_GET FFA_SMC_32(0x88) +#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88) +#define FFA_MEM_PERM_SET FFA_SMC_32(0x89) +#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89) /* * For some calls it is necessary to use SMC64 to pass or return 64-bit values. @@ -76,6 +95,7 @@ #define FFA_RET_DENIED (-6) #define FFA_RET_RETRY (-7) #define FFA_RET_ABORTED (-8) +#define FFA_RET_NO_DATA (-9) /* FFA version encoding */ #define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) @@ -86,6 +106,7 @@ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) +#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1) /** * FF-A specification mentions explicitly about '4K pages'. This should @@ -188,6 +209,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; } #define module_ffa_driver(__ffa_driver) \ module_driver(__ffa_driver, ffa_register, ffa_unregister) +extern struct bus_type ffa_bus_type; + /* FFA transport related */ struct ffa_partition_info { u16 id; @@ -278,8 +301,8 @@ struct ffa_mem_region { #define FFA_MEM_NON_SHAREABLE (0) #define FFA_MEM_OUTER_SHAREABLE (2) #define FFA_MEM_INNER_SHAREABLE (3) - u8 attributes; - u8 reserved_0; + /* Memory region attributes, upper byte MBZ pre v1.1 */ + u16 attributes; /* * Clear memory region contents after unmapping it from the sender and * before mapping it for any receiver. @@ -317,27 +340,41 @@ struct ffa_mem_region { * memory region. */ u64 tag; - u32 reserved_1; + /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */ + u32 ep_mem_size; /* * The number of `ffa_mem_region_attributes` entries included in this * transaction. */ u32 ep_count; /* - * An array of endpoint memory access descriptors. - * Each one specifies a memory region offset, an endpoint and the - * attributes with which this memory region should be mapped in that - * endpoint's page table. + * 16-byte aligned offset from the base address of this descriptor + * to the first element of the endpoint memory access descriptor array + * Valid only from v1.1 */ - struct ffa_mem_region_attributes ep_mem_access[]; + u32 ep_mem_offset; + /* MBZ, valid only from v1.1 */ + u32 reserved[3]; }; -#define COMPOSITE_OFFSET(x) \ - (offsetof(struct ffa_mem_region, ep_mem_access[x])) #define CONSTITUENTS_OFFSET(x) \ (offsetof(struct ffa_composite_mem_region, constituents[x])) -#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ - (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) + +static inline u32 +ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version) +{ + u32 offset = count * sizeof(struct ffa_mem_region_attributes); + /* + * Earlier to v1.1, the endpoint memory descriptor array started at + * offset 32(i.e. offset of ep_mem_offset in the current structure) + */ + if (ffa_version <= FFA_VERSION_1_0) + offset += offsetof(struct ffa_mem_region, ep_mem_offset); + else + offset += sizeof(struct ffa_mem_region); + + return offset; +} struct ffa_mem_ops_args { bool use_txbuf; @@ -367,10 +404,30 @@ struct ffa_mem_ops { int (*memory_lend)(struct ffa_mem_ops_args *args); }; +struct ffa_cpu_ops { + int (*run)(struct ffa_device *dev, u16 vcpu); +}; + +typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data); +typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data); + +struct ffa_notifier_ops { + int (*sched_recv_cb_register)(struct ffa_device *dev, + ffa_sched_recv_cb cb, void *cb_data); + int (*sched_recv_cb_unregister)(struct ffa_device *dev); + int (*notify_request)(struct ffa_device *dev, bool per_vcpu, + ffa_notifier_cb cb, void *cb_data, int notify_id); + int (*notify_relinquish)(struct ffa_device *dev, int notify_id); + int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu, + u16 vcpu); +}; + struct ffa_ops { const struct ffa_info_ops *info_ops; const struct ffa_msg_ops *msg_ops; const struct ffa_mem_ops *mem_ops; + const struct ffa_cpu_ops *cpu_ops; + const struct ffa_notifier_ops *notifier_ops; }; #endif /* _LINUX_ARM_FFA_H */ diff --git a/include/linux/array_size.h b/include/linux/array_size.h new file mode 100644 index 0000000000..06d7d83196 --- /dev/null +++ b/include/linux/array_size.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ARRAY_SIZE_H +#define _LINUX_ARRAY_SIZE_H + +#include <linux/compiler.h> + +/** + * ARRAY_SIZE - get the number of elements in array @arr + * @arr: array to be sized + */ +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) + +#endif /* _LINUX_ARRAY_SIZE_H */ diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index b83ef19da1..5e95faa959 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -428,6 +428,19 @@ extern void raw_cmpxchg128_relaxed_not_implemented(void); #define raw_sync_cmpxchg arch_sync_cmpxchg +#ifdef arch_sync_try_cmpxchg +#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg +#else +#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif + /** * raw_atomic_read() - atomic load with relaxed ordering * @v: pointer to atomic_t @@ -4649,4 +4662,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v) } #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 2fdd6702823fa842f9cea57a002e6e4476ae780c +// eec048affea735b8464f58e6d96992101f8f85f1 diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index d401b406ef..54d7bbe0ae 100644 --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -4998,6 +4998,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) +#define sync_try_cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kcsan_mb(); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// 1568f875fef72097413caab8339120c065a39aa4 +// 2cc4bc990fef44d3836ec108f11b610f3f438184 diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index d0807ad43f..99ae7960a8 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -4,6 +4,11 @@ #ifndef _VIRTCHNL_H_ #define _VIRTCHNL_H_ +#include <linux/bitops.h> +#include <linux/bits.h> +#include <linux/overflow.h> +#include <uapi/linux/if_ether.h> + /* Description: * This header file describes the Virtual Function (VF) - Physical Function * (PF) communication protocol used by the drivers for all devices starting @@ -240,6 +245,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) /* used to negotiate communicating link speeds in Mbps */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) +#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) @@ -295,7 +301,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); /* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. * External data buffer contains one instance of virtchnl_rxq_info. - * PF configures requested queue and returns a status code. + * PF configures requested queue and returns a status code. The + * crc_disable flag disables CRC stripping on the VF. Setting + * the crc_disable flag to 1 will disable CRC stripping for each + * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC + * offload must have been set prior to sending this info or the PF + * will ignore the request. This flag should be set the same for + * all of the queues for a VF. */ /* Rx queue config info */ @@ -307,7 +319,7 @@ struct virtchnl_rxq_info { u16 splithdr_enabled; /* deprecated with AVF 1.0 */ u32 databuffer_size; u32 max_pkt_size; - u8 pad0; + u8 crc_disable; u8 rxdid; u8 pad1[2]; u64 dma_ring_addr; diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index ae12696ec4..2ad261082b 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -141,8 +141,6 @@ struct bdi_writeback { struct delayed_work dwork; /* work item used for writeback */ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */ - unsigned long dirty_sleep; /* last wait */ - struct list_head bdi_node; /* anchored at bdi->wb_list */ #ifdef CONFIG_CGROUP_WRITEBACK @@ -179,6 +177,11 @@ struct backing_dev_info { * any dirty wbs, which is depended upon by bdi_has_dirty(). */ atomic_long_t tot_write_bandwidth; + /* + * Jiffies when last process was dirty throttled on this bdi. Used by + * blk-wbt. + */ + unsigned long last_bdp_sleep; struct bdi_writeback wb; /* the root writeback info for this bdi */ struct list_head wb_list; /* list of all wbs */ diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h index 2426276b9b..670f2dae69 100644 --- a/include/linux/badblocks.h +++ b/include/linux/badblocks.h @@ -15,6 +15,7 @@ #define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9) #define BB_LEN(x) (((x) & BB_LEN_MASK) + 1) #define BB_ACK(x) (!!((x) & BB_ACK_MASK)) +#define BB_END(x) (BB_OFFSET(x) + BB_LEN(x)) #define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) /* Bad block numbers are stored sorted in a single page. @@ -41,6 +42,12 @@ struct badblocks { sector_t size; /* in sectors */ }; +struct badblocks_context { + sector_t start; + sector_t len; + int ack; +}; + int badblocks_check(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors); int badblocks_set(struct badblocks *bb, sector_t s, int sectors, @@ -63,4 +70,27 @@ static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb) } badblocks_exit(bb); } + +static inline int badblocks_full(struct badblocks *bb) +{ + return (bb->count >= MAX_BADBLOCKS); +} + +static inline int badblocks_empty(struct badblocks *bb) +{ + return (bb->count == 0); +} + +static inline void set_changed(struct badblocks *bb) +{ + if (bb->changed != 1) + bb->changed = 1; +} + +static inline void clear_changed(struct badblocks *bb) +{ + if (bb->changed != 0) + bb->changed = 0; +} + #endif diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8d51f69f9f..70f97f685b 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -90,6 +90,16 @@ struct linux_binfmt { #endif } __randomize_layout; +#if IS_ENABLED(CONFIG_BINFMT_MISC) +struct binfmt_misc { + struct list_head entries; + rwlock_t entries_lock; + bool enabled; +} __randomize_layout; + +extern struct binfmt_misc init_binfmt_misc; +#endif + extern void __register_binfmt(struct linux_binfmt *fmt, int insert); /* Registration of default binfmt handlers */ diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h new file mode 100644 index 0000000000..17caeca94c --- /dev/null +++ b/include/linux/bitmap-str.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_BITMAP_STR_H +#define __LINUX_BITMAP_STR_H + +int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); +int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); +extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); +extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); +int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits); +int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); +int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); + +#endif /* __LINUX_BITMAP_STR_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 03644237e1..99451431e4 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -6,10 +6,12 @@ #include <linux/align.h> #include <linux/bitops.h> +#include <linux/errno.h> #include <linux/find.h> #include <linux/limits.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/bitmap-str.h> struct device; @@ -200,14 +202,6 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -int bitmap_parse(const char *buf, unsigned int buflen, - unsigned long *dst, int nbits); -int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, - unsigned long *dst, int nbits); -int bitmap_parselist(const char *buf, unsigned long *maskp, - int nmaskbits); -int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, - unsigned long *dst, int nbits); void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); int bitmap_bitremap(int oldbit, @@ -216,23 +210,6 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); -int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); -void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); -int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); - -#ifdef __BIG_ENDIAN -void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); -#else -#define bitmap_copy_le bitmap_copy -#endif -int bitmap_print_to_pagebuf(bool list, char *buf, - const unsigned long *maskp, int nmaskbits); - -extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, - int nmaskbits, loff_t off, size_t count); - -extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, - int nmaskbits, loff_t off, size_t count); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) @@ -519,6 +496,66 @@ static inline void bitmap_next_set_region(unsigned long *bitmap, } /** + * bitmap_release_region - release allocated bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to release + * @order: region size (log base 2 of number of bits) to release + * + * This is the complement to __bitmap_find_free_region() and releases + * the found region (by clearing it in the bitmap). + */ +static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) +{ + bitmap_clear(bitmap, pos, BIT(order)); +} + +/** + * bitmap_allocate_region - allocate bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to allocate + * @order: region size (log base 2 of number of bits) to allocate + * + * Allocate (set bits in) a specified region of a bitmap. + * + * Returns: 0 on success, or %-EBUSY if specified region wasn't + * free (not all bits were zero). + */ +static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) +{ + unsigned int len = BIT(order); + + if (find_next_bit(bitmap, pos + len, pos) < pos + len) + return -EBUSY; + bitmap_set(bitmap, pos, len); + return 0; +} + +/** + * bitmap_find_free_region - find a contiguous aligned mem region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @bits: number of bits in the bitmap + * @order: region size (log base 2 of number of bits) to find + * + * Find a region of free (zero) bits in a @bitmap of @bits bits and + * allocate them (set them to one). Only consider regions of length + * a power (@order) of two, aligned to that power of two, which + * makes the search algorithm much faster. + * + * Returns: the bit offset in bitmap of the allocated region, + * or -errno on failure. + */ +static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) +{ + unsigned int pos, end; /* scans bitmap by regions of size order */ + + for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) { + if (!bitmap_allocate_region(bitmap, pos, order)) + return pos; + } + return -ENOMEM; +} + +/** * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. * @n: u64 value * diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 958ed7e89b..1ab3081c82 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -32,8 +32,6 @@ typedef __u32 __bitwise req_flags_t; #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) /* merge of different types, fail separately */ #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) -/* track inflight for MQ */ -#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) /* don't call prep for this one */ #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) /* use hctx->sched_tags */ diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h index 2580e05a8a..004b38a538 100644 --- a/include/linux/blk-pm.h +++ b/include/linux/blk-pm.h @@ -15,7 +15,6 @@ extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_pre_runtime_resume(struct request_queue *q); extern void blk_post_runtime_resume(struct request_queue *q); -extern void blk_set_runtime_active(struct request_queue *q); #else static inline void blk_pm_runtime_init(struct request_queue *q, struct device *dev) {} diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 92c8997b19..b29ebd5341 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -49,9 +49,10 @@ struct block_device { bool bd_write_holder; bool bd_has_submit_bio; dev_t bd_dev; + struct inode *bd_inode; /* will die */ + atomic_t bd_openers; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ - struct inode * bd_inode; /* will die */ void * bd_claiming; void * bd_holder; const struct blk_holder_ops *bd_holder_ops; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f59fcd5b49..88e9dd4b71 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1479,14 +1479,25 @@ extern const struct blk_holder_ops fs_holder_ops; #define sb_open_mode(flags) \ (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) +struct bdev_handle { + struct block_device *bdev; + void *holder; + blk_mode_t mode; +}; + struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); +struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, + const struct blk_holder_ops *hops); +struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode, + void *holder, const struct blk_holder_ops *hops); int bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops); void bd_abort_claiming(struct block_device *bdev, void *holder); void blkdev_put(struct block_device *bdev, void *holder); +void bdev_release(struct bdev_handle *handle); /* just for blk-cgroup, don't use elsewhere */ struct block_device *blkdev_get_no_open(dev_t dev); diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index e1a3c9c975..cffa38a736 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -60,7 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page, static inline void free_bootmem_page(struct page *page) { - kmemleak_free_part(page_to_virt(page), PAGE_SIZE); + kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); free_reserved_page(page); } #endif diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 7b121bd780..0985221d54 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -28,19 +28,24 @@ enum cgroup_bpf_attach_type { CGROUP_INET6_BIND, CGROUP_INET4_CONNECT, CGROUP_INET6_CONNECT, + CGROUP_UNIX_CONNECT, CGROUP_INET4_POST_BIND, CGROUP_INET6_POST_BIND, CGROUP_UDP4_SENDMSG, CGROUP_UDP6_SENDMSG, + CGROUP_UNIX_SENDMSG, CGROUP_SYSCTL, CGROUP_UDP4_RECVMSG, CGROUP_UDP6_RECVMSG, + CGROUP_UNIX_RECVMSG, CGROUP_GETSOCKOPT, CGROUP_SETSOCKOPT, CGROUP_INET4_GETPEERNAME, CGROUP_INET6_GETPEERNAME, + CGROUP_UNIX_GETPEERNAME, CGROUP_INET4_GETSOCKNAME, CGROUP_INET6_GETSOCKNAME, + CGROUP_UNIX_GETSOCKNAME, CGROUP_INET_SOCK_RELEASE, CGROUP_LSM_START, CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1, diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 31561e7897..a789266fea 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -48,19 +48,24 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) CGROUP_ATYPE(CGROUP_INET6_BIND); CGROUP_ATYPE(CGROUP_INET4_CONNECT); CGROUP_ATYPE(CGROUP_INET6_CONNECT); + CGROUP_ATYPE(CGROUP_UNIX_CONNECT); CGROUP_ATYPE(CGROUP_INET4_POST_BIND); CGROUP_ATYPE(CGROUP_INET6_POST_BIND); CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); + CGROUP_ATYPE(CGROUP_UNIX_SENDMSG); CGROUP_ATYPE(CGROUP_SYSCTL); CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); + CGROUP_ATYPE(CGROUP_UNIX_RECVMSG); CGROUP_ATYPE(CGROUP_GETSOCKOPT); CGROUP_ATYPE(CGROUP_SETSOCKOPT); CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); + CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME); CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME); CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); default: return CGROUP_BPF_ATTACH_TYPE_INVALID; @@ -138,11 +143,12 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, - int *optname, char __user *optval, + int *optname, sockptr_t optval, int *optlen, char **kernel_optval); + int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, - int optname, char __user *optval, - int __user *optlen, int max_optlen, + int optname, sockptr_t optval, + sockptr_t optlen, int max_optlen, int retval); int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, @@ -289,18 +295,27 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL) +#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL) + #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx) +#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx) + #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL) +#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL) + /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a * fullsock and its parent fullsock cannot be traced by * sk_to_full_sk(). @@ -378,7 +393,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, ({ \ int __ret = 0; \ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ - get_user(__ret, optlen); \ + copy_from_sockptr(&__ret, optlen, sizeof(int)); \ __ret; \ }) @@ -492,10 +507,13 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9b08d792fa..cfc6d2f980 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -55,7 +55,7 @@ struct cgroup; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; extern struct kobject *btf_kobj; -extern struct bpf_mem_alloc bpf_global_ma; +extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; extern bool bpf_global_ma_set; typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); @@ -184,14 +184,15 @@ enum btf_field_type { BPF_TIMER = (1 << 1), BPF_KPTR_UNREF = (1 << 2), BPF_KPTR_REF = (1 << 3), - BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF, - BPF_LIST_HEAD = (1 << 4), - BPF_LIST_NODE = (1 << 5), - BPF_RB_ROOT = (1 << 6), - BPF_RB_NODE = (1 << 7), + BPF_KPTR_PERCPU = (1 << 4), + BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU, + BPF_LIST_HEAD = (1 << 5), + BPF_LIST_NODE = (1 << 6), + BPF_RB_ROOT = (1 << 7), + BPF_RB_NODE = (1 << 8), BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD | BPF_RB_NODE | BPF_RB_ROOT, - BPF_REFCOUNT = (1 << 8), + BPF_REFCOUNT = (1 << 9), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -309,6 +310,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: return "kptr"; + case BPF_KPTR_PERCPU: + return "percpu_kptr"; case BPF_LIST_HEAD: return "bpf_list_head"; case BPF_LIST_NODE: @@ -334,6 +337,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type) return sizeof(struct bpf_timer); case BPF_KPTR_UNREF: case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: return sizeof(u64); case BPF_LIST_HEAD: return sizeof(struct bpf_list_head); @@ -360,6 +364,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type) return __alignof__(struct bpf_timer); case BPF_KPTR_UNREF: case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: return __alignof__(u64); case BPF_LIST_HEAD: return __alignof__(struct bpf_list_head); @@ -398,6 +403,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) case BPF_TIMER: case BPF_KPTR_UNREF: case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: break; default: WARN_ON_ONCE(1); @@ -1396,6 +1402,7 @@ struct bpf_prog_aux { u32 stack_depth; u32 id; u32 func_cnt; /* used by non-func prog as the number of func progs */ + u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 attach_btf_id; /* in-kernel BTF type id to attach to */ u32 ctx_arg_info_size; @@ -1416,6 +1423,8 @@ struct bpf_prog_aux { bool sleepable; bool tail_call_reachable; bool xdp_has_frags; + bool exception_cb; + bool exception_boundary; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -1438,6 +1447,7 @@ struct bpf_prog_aux { int cgroup_atype; /* enum cgroup_bpf_attach_type */ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; char name[BPF_OBJ_NAME_LEN]; + u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64); #ifdef CONFIG_SECURITY void *security; #endif @@ -2061,6 +2071,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec); bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); void bpf_obj_free_timer(const struct btf_record *rec, void *obj); void bpf_obj_free_fields(const struct btf_record *rec, void *obj); +void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); @@ -2167,12 +2178,12 @@ static inline bool bpf_allow_uninit_stack(void) static inline bool bpf_bypass_spec_v1(void) { - return perfmon_capable(); + return cpu_mitigations_off() || perfmon_capable(); } static inline bool bpf_bypass_spec_v4(void) { - return perfmon_capable(); + return cpu_mitigations_off() || perfmon_capable(); } int bpf_map_new_fd(struct bpf_map *map, int flags); @@ -2425,9 +2436,11 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *regs); int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, - struct bpf_reg_state *reg); + struct bpf_reg_state *reg, bool is_ex_cb); int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, struct btf *btf, const struct btf_type *t); +const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, + int comp_idx, const char *tag_key); struct bpf_prog *bpf_prog_by_id(u32 id); struct bpf_link *bpf_link_by_id(u32 id); @@ -2479,6 +2492,9 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, enum bpf_dynptr_type type, u32 offset, u32 size); void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); + +bool dev_check_flush(void); +bool cpu_map_check_flush(void); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -2923,6 +2939,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ +static __always_inline void +bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) +{ + const struct bpf_prog_array_item *item; + struct bpf_prog *prog; + + if (unlikely(!array)) + return; + + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + bpf_prog_inc_misses_counter(prog); + item++; + } +} + #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) void bpf_sk_reuseport_detach(struct sock *sk); int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, @@ -3204,4 +3236,9 @@ static inline gfp_t bpf_memcg_flags(gfp_t flags) return flags; } +static inline bool bpf_is_subprog(const struct bpf_prog *prog) +{ + return prog->aux->func_idx != 0; +} + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index fc0d6f32c6..94baced5a1 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -142,9 +142,13 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) #ifdef CONFIG_NET BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) +BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter) +BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx) +BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit) #endif #ifdef CONFIG_PERF_EVENTS BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops) +BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 2d84d820a7..aa4d19d0bc 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -300,6 +300,7 @@ struct bpf_func_state { bool in_callback_fn; struct tnum callback_ret_range; bool in_async_callback_fn; + bool in_exception_callback_fn; /* For callback calling functions that limit number of possible * callback executions (e.g. bpf_loop) keeps track of current * simulated iteration number. @@ -413,19 +414,18 @@ struct bpf_verifier_state { u32 callback_unroll_depth; }; -#define bpf_get_spilled_reg(slot, frame) \ +#define bpf_get_spilled_reg(slot, frame, mask) \ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ - (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ + ((1 << frame->stack[slot].slot_type[0]) & (mask))) \ ? &frame->stack[slot].spilled_ptr : NULL) /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ -#define bpf_for_each_spilled_reg(iter, frame, reg) \ - for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ +#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \ + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \ iter < frame->allocated_stack / BPF_REG_SIZE; \ - iter++, reg = bpf_get_spilled_reg(iter, frame)) + iter++, reg = bpf_get_spilled_reg(iter, frame, mask)) -/* Invoke __expr over regsiters in __vst, setting __state and __reg */ -#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ +#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \ ({ \ struct bpf_verifier_state *___vstate = __vst; \ int ___i, ___j; \ @@ -437,7 +437,7 @@ struct bpf_verifier_state { __reg = &___regs[___j]; \ (void)(__expr); \ } \ - bpf_for_each_spilled_reg(___j, __state, __reg) { \ + bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \ if (!__reg) \ continue; \ (void)(__expr); \ @@ -445,6 +445,10 @@ struct bpf_verifier_state { } \ }) +/* Invoke __expr over regsiters in __vst, setting __state and __reg */ +#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ + bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; @@ -508,6 +512,7 @@ struct bpf_insn_aux_data { bool zext_dst; /* this insn zero extends dst reg */ bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */ bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */ + bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ u8 alu_state; /* used in combination with alu_limit */ /* below fields are initialized once */ @@ -572,7 +577,9 @@ struct bpf_subprog_info { bool has_tail_call; bool tail_call_reachable; bool has_ld_abs; + bool is_cb; bool is_async_cb; + bool is_exception_cb; }; struct bpf_verifier_env; @@ -619,6 +626,8 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ + u32 hidden_subprog_cnt; /* number of hidden subprogs */ + int exception_callback_subprog; bool explore_alu_limits; bool allow_ptr_leaks; bool allow_uninit_stack; @@ -626,10 +635,11 @@ struct bpf_verifier_env { bool bypass_spec_v1; bool bypass_spec_v4; bool seen_direct_write; + bool seen_exception; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; - struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; + struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */ union { struct bpf_idmap idmap_scratch; struct bpf_idset idset_scratch; diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index c55810a435..1394ba3023 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -11,6 +11,7 @@ #define PHY_ID_BCM50610 0x0143bd60 #define PHY_ID_BCM50610M 0x0143bd70 +#define PHY_ID_BCM5221 0x004061e0 #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 @@ -331,6 +332,15 @@ #define BCM54XX_WOL_INT_STATUS (MII_BCM54XX_EXP_SEL_WOL + 0x94) +/* BCM5221 Registers */ +#define BCM5221_AEGSR 0x1C +#define BCM5221_AEGSR_MDIX_STATUS BIT(13) +#define BCM5221_AEGSR_MDIX_MAN_SWAP BIT(12) +#define BCM5221_AEGSR_MDIX_DIS BIT(11) + +#define BCM5221_SHDW_AM4_EN_CLK_LPM BIT(2) +#define BCM5221_SHDW_AM4_FORCE_LPM BIT(1) + /*****************************************************************************/ /* Fast Ethernet Transceiver definitions. */ /*****************************************************************************/ diff --git a/include/linux/btf.h b/include/linux/btf.h index 928113a80a..59d404e228 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -74,6 +74,7 @@ #define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */ #define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */ #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */ +#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the @@ -83,6 +84,17 @@ */ #define __bpf_kfunc __used noinline +#define __bpf_kfunc_start_defs() \ + __diag_push(); \ + __diag_ignore_all("-Wmissing-declarations", \ + "Global kfuncs as their definitions will be in BTF");\ + __diag_ignore_all("-Wmissing-prototypes", \ + "Global kfuncs as their definitions will be in BTF") + +#define __bpf_kfunc_end_defs() __diag_pop() +#define __bpf_hook_start() __bpf_kfunc_start_defs() +#define __bpf_hook_end() __bpf_kfunc_end_defs() + /* * Return the name of the passed struct, if exists, or halt the build if for * example the structure gets renamed. In this way, developers have to revisit diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 44e9de51ee..5f23ee5998 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -198,13 +198,11 @@ void touch_buffer(struct buffer_head *bh); void folio_set_bh(struct buffer_head *bh, struct folio *folio, unsigned long offset); struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, - bool retry); + gfp_t gfp); struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, bool retry); -void create_empty_buffers(struct page *, unsigned long, - unsigned long b_state); -void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize, - unsigned long b_state); +struct buffer_head *create_empty_buffers(struct folio *folio, + unsigned long blocksize, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_write_sync(struct buffer_head *bh, int uptodate); void end_buffer_async_write(struct buffer_head *bh, int uptodate); @@ -227,8 +225,8 @@ void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size); -struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, - unsigned size, gfp_t gfp); +struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, + unsigned size, gfp_t gfp); void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); @@ -338,17 +336,38 @@ sb_breadahead(struct super_block *sb, sector_t block) __breadahead(sb->s_bdev, block, sb->s_blocksize); } -static inline struct buffer_head * -sb_getblk(struct super_block *sb, sector_t block) +static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, + sector_t block, unsigned size) { - return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); + gfp_t gfp; + + gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp |= __GFP_NOFAIL; + + return bdev_getblk(bdev, block, size, gfp); } +static inline struct buffer_head *__getblk(struct block_device *bdev, + sector_t block, unsigned size) +{ + gfp_t gfp; -static inline struct buffer_head * -sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) + gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp |= __GFP_MOVABLE | __GFP_NOFAIL; + + return bdev_getblk(bdev, block, size, gfp); +} + +static inline struct buffer_head *sb_getblk(struct super_block *sb, + sector_t block) { - return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp); + return __getblk(sb->s_bdev, block, sb->s_blocksize); +} + +static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb, + sector_t block, gfp_t gfp) +{ + return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp); } static inline struct buffer_head * @@ -385,20 +404,6 @@ static inline void lock_buffer(struct buffer_head *bh) __lock_buffer(bh); } -static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, - sector_t block, - unsigned size) -{ - return __getblk_gfp(bdev, block, size, 0); -} - -static inline struct buffer_head *__getblk(struct block_device *bdev, - sector_t block, - unsigned size) -{ - return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); -} - static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags) { if (!buffer_uptodate(bh) && trylock_buffer(bh)) { @@ -450,6 +455,28 @@ __bread(struct block_device *bdev, sector_t block, unsigned size) return __bread_gfp(bdev, block, size, __GFP_MOVABLE); } +/** + * get_nth_bh - Get a reference on the n'th buffer after this one. + * @bh: The buffer to start counting from. + * @count: How many buffers to skip. + * + * This is primarily useful for finding the nth buffer in a folio; in + * that case you pass the head buffer and the byte offset in the folio + * divided by the block size. It can be used for other purposes, but + * it will wrap at the end of the folio rather than returning NULL or + * proceeding to the next folio for you. + * + * Return: The requested buffer with an elevated refcount. + */ +static inline __must_check +struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count) +{ + while (count--) + bh = bh->b_this_page; + get_bh(bh); + return bh; +} + bool block_dirty_folio(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_BUFFER_HEAD diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 3b7a0ff464..8a582d242f 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -2,10 +2,11 @@ #ifndef _LINUX_BUILDID_H #define _LINUX_BUILDID_H -#include <linux/mm_types.h> +#include <linux/types.h> #define BUILD_ID_SIZE_MAX 20 +struct vm_area_struct; int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index a5cfd44fab..d504eb4b49 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -73,6 +73,7 @@ struct cacheinfo { struct cpu_cacheinfo { struct cacheinfo *info_list; + unsigned int per_cpu_data_slice_size; unsigned int num_levels; unsigned int num_leaves; bool cpu_map_populated; diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 982ba245eb..1b92aed493 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -195,6 +195,10 @@ int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); const char *can_get_state_str(const enum can_state state); +void can_state_get_by_berr_counter(const struct net_device *dev, + const struct can_berr_counter *bec, + enum can_state *tx_state, + enum can_state *rx_state); void can_change_state(struct net_device *dev, struct can_frame *cf, enum can_state tx_state, enum can_state rx_state); diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h index bead71b7bc..94ad2c9017 100644 --- a/include/linux/cdx/cdx_bus.h +++ b/include/linux/cdx/cdx_bus.h @@ -21,13 +21,19 @@ struct cdx_controller; enum { + CDX_DEV_BUS_MASTER_CONF, CDX_DEV_RESET_CONF, }; struct cdx_device_config { u8 type; + bool bus_master_enable; }; +typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num); + +typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num); + typedef int (*cdx_scan_cb)(struct cdx_controller *cdx); typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx, @@ -35,6 +41,19 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx, struct cdx_device_config *dev_config); /** + * CDX_DEVICE - macro used to describe a specific CDX device + * @vend: the 16 bit CDX Vendor ID + * @dev: the 16 bit CDX Device ID + * + * This macro is used to create a struct cdx_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * CDX_ANY_ID. + */ +#define CDX_DEVICE(vend, dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID + +/** * CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with * override_only flags. * @vend: the 16 bit CDX Vendor ID @@ -42,18 +61,24 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx, * @driver_override: the 32 bit CDX Device override_only * * This macro is used to create a struct cdx_device_id that matches only a - * driver_override device. + * driver_override device. The subvendor and subdevice fields will be set to + * CDX_ANY_ID. */ #define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ - .vendor = (vend), .device = (dev), .override_only = (driver_override) + .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\ + .subdevice = CDX_ANY_ID, .override_only = (driver_override) /** * struct cdx_ops - Callbacks supported by CDX controller. + * @bus_enable: enable bus on the controller + * @bus_disable: disable bus on the controller * @scan: scan the devices on the controller * @dev_configure: configuration like reset, master_enable, * msi_config etc for a CDX device */ struct cdx_ops { + cdx_bus_enable_cb bus_enable; + cdx_bus_disable_cb bus_disable; cdx_scan_cb scan; cdx_dev_configure_cb dev_configure; }; @@ -63,12 +88,14 @@ struct cdx_ops { * @dev: Linux device associated with the CDX controller. * @priv: private data * @id: Controller ID + * @controller_registered: controller registered with bus * @ops: CDX controller ops */ struct cdx_controller { struct device *dev; void *priv; u32 id; + bool controller_registered; struct cdx_ops *ops; }; @@ -78,6 +105,10 @@ struct cdx_controller { * @cdx: CDX controller associated with the device * @vendor: Vendor ID for CDX device * @device: Device ID for CDX device + * @subsystem_vendor: Subsystem Vendor ID for CDX device + * @subsystem_device: Subsystem Device ID for CDX device + * @class: Class for the CDX device + * @revision: Revision of the CDX device * @bus_num: Bus number for this CDX device * @dev_num: Device number for this device * @res: array of MMIO region entries @@ -86,6 +117,8 @@ struct cdx_controller { * @dma_mask: Default DMA mask * @flags: CDX device flags * @req_id: Requestor ID associated with CDX device + * @is_bus: Is this bus device + * @enabled: is this bus enabled * @driver_override: driver name to force a match; do not set directly, * because core frees it; use driver_set_override() to * set or clear it. @@ -95,6 +128,10 @@ struct cdx_device { struct cdx_controller *cdx; u16 vendor; u16 device; + u16 subsystem_vendor; + u16 subsystem_device; + u32 class; + u8 revision; u8 bus_num; u8 dev_num; struct resource res[MAX_CDX_DEV_RESOURCES]; @@ -102,6 +139,8 @@ struct cdx_device { u64 dma_mask; u16 flags; u32 req_id; + bool is_bus; + bool enabled; const char *driver_override; }; @@ -170,4 +209,20 @@ extern struct bus_type cdx_bus_type; */ int cdx_dev_reset(struct device *dev); +/** + * cdx_set_master - enables bus-mastering for CDX device + * @cdx_dev: the CDX device to enable + * + * Return: 0 for success, -errno on failure + */ +int cdx_set_master(struct cdx_device *cdx_dev); + +/** + * cdx_clear_master - disables bus-mastering for CDX device + * @cdx_dev: the CDX device to disable + * + * Return: 0 for success, -errno on failure + */ +int cdx_clear_master(struct cdx_device *cdx_dev); + #endif /* _CDX_BUS_H_ */ diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index d5a5da838c..11a92a9460 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h @@ -19,12 +19,25 @@ pr_debug("%.*s %12.12s:%-4d : " fmt, \ 8 - (int)sizeof(KBUILD_MODNAME), " ", \ kbasename(__FILE__), __LINE__, ##__VA_ARGS__) +# define doutc(client, fmt, ...) \ + pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \ + 8 - (int)sizeof(KBUILD_MODNAME), " ", \ + kbasename(__FILE__), __LINE__, \ + &client->fsid, client->monc.auth->global_id, \ + ##__VA_ARGS__) # else /* faux printk call just to see any compiler warnings. */ # define dout(fmt, ...) do { \ if (0) \ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ } while (0) +# define doutc(client, fmt, ...) do { \ + if (0) \ + printk(KERN_DEBUG "[%pU %llu] " fmt, \ + &client->fsid, \ + client->monc.auth->global_id, \ + ##__VA_ARGS__); \ + } while (0) # endif #else @@ -33,7 +46,32 @@ * or, just wrap pr_debug */ # define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) +# define doutc(client, fmt, ...) \ + pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \ + client->monc.auth->global_id, __func__, ##__VA_ARGS__) #endif +#define pr_notice_client(client, fmt, ...) \ + pr_notice("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_info_client(client, fmt, ...) \ + pr_info("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_warn_client(client, fmt, ...) \ + pr_warn("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_warn_once_client(client, fmt, ...) \ + pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_err_client(client, fmt, ...) \ + pr_err("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_warn_ratelimited_client(client, fmt, ...) \ + pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) +#define pr_err_ratelimited_client(client, fmt, ...) \ + pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \ + client->monc.auth->global_id, ##__VA_ARGS__) + #endif diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index f3b3593254..ee1d0e5f97 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -357,6 +357,11 @@ enum { CEPH_MDS_OP_RENAMESNAP = 0x01403, }; +#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \ + op == CEPH_MDS_OP_MKNOD || \ + op == CEPH_MDS_OP_MKDIR || \ + op == CEPH_MDS_OP_SYMLINK) + extern const char *ceph_mds_op_name(int op); #define CEPH_SETATTR_MODE (1 << 0) @@ -497,7 +502,7 @@ struct ceph_mds_request_head_legacy { union ceph_mds_request_args args; } __attribute__ ((packed)); -#define CEPH_MDS_REQUEST_HEAD_VERSION 2 +#define CEPH_MDS_REQUEST_HEAD_VERSION 3 struct ceph_mds_request_head_old { __le16 version; /* struct version */ @@ -528,6 +533,9 @@ struct ceph_mds_request_head { __le32 ext_num_retry; /* new count retry attempts */ __le32 ext_num_fwd; /* new count fwd attempts */ + + __le32 struct_len; /* to store size of struct ceph_mds_request_head */ + __le32 owner_uid, owner_gid; /* used for OPs which create inodes */ } __attribute__ ((packed)); /* cap/lease release record */ diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h deleted file mode 100644 index 4c3e0648dc..0000000000 --- a/include/linux/ceph/mdsmap.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _FS_CEPH_MDSMAP_H -#define _FS_CEPH_MDSMAP_H - -#include <linux/bug.h> -#include <linux/ceph/types.h> - -/* - * mds map - describe servers in the mds cluster. - * - * we limit fields to those the client actually xcares about - */ -struct ceph_mds_info { - u64 global_id; - struct ceph_entity_addr addr; - s32 state; - int num_export_targets; - bool laggy; - u32 *export_targets; -}; - -struct ceph_mdsmap { - u32 m_epoch, m_client_epoch, m_last_failure; - u32 m_root; - u32 m_session_timeout; /* seconds */ - u32 m_session_autoclose; /* seconds */ - u64 m_max_file_size; - u64 m_max_xattr_size; /* maximum size for xattrs blob */ - u32 m_max_mds; /* expected up:active mds number */ - u32 m_num_active_mds; /* actual up:active mds number */ - u32 possible_max_rank; /* possible max rank index */ - struct ceph_mds_info *m_info; - - /* which object pools file data can be stored in */ - int m_num_data_pg_pools; - u64 *m_data_pg_pools; - u64 m_cas_pg_pool; - - bool m_enabled; - bool m_damaged; - int m_num_laggy; -}; - -static inline struct ceph_entity_addr * -ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) -{ - if (w >= m->possible_max_rank) - return NULL; - return &m->m_info[w].addr; -} - -static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) -{ - BUG_ON(w < 0); - if (w >= m->possible_max_rank) - return CEPH_MDS_STATE_DNE; - return m->m_info[w].state; -} - -static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) -{ - if (w >= 0 && w < m->possible_max_rank) - return m->m_info[w].laggy; - return false; -} - -extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); -struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2); -extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); -extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m); - -#endif diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 2eaaabbe98..1717cc57cd 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -283,7 +283,7 @@ struct ceph_msg { struct kref kref; bool more_to_follow; bool needs_out_seq; - bool sparse_read; + u64 sparse_read_total; int front_alloc_len; struct ceph_msgpool *pool; diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index b658961156..7a9a40163c 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -19,7 +19,7 @@ struct ceph_monmap { struct ceph_fsid fsid; u32 epoch; u32 num_mon; - struct ceph_entity_inst mon_inst[]; + struct ceph_entity_inst mon_inst[] __counted_by(num_mon); }; struct ceph_mon_client; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index bf98239567..5edf9fffa0 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -45,6 +45,7 @@ enum ceph_sparse_read_state { CEPH_SPARSE_READ_HDR = 0, CEPH_SPARSE_READ_EXTENTS, CEPH_SPARSE_READ_DATA_LEN, + CEPH_SPARSE_READ_DATA_PRE, CEPH_SPARSE_READ_DATA, }; @@ -64,7 +65,7 @@ struct ceph_sparse_read { u64 sr_req_len; /* orig request length */ u64 sr_pos; /* current pos in buffer */ int sr_index; /* current extent index */ - __le32 sr_datalen; /* length of actual data */ + u32 sr_datalen; /* length of actual data */ u32 sr_count; /* extent count in reply */ int sr_ext_len; /* length of extent array */ struct ceph_sparse_extent *sr_extent; /* extent array */ @@ -278,7 +279,7 @@ struct ceph_osd_request { int r_attempts; u32 r_map_dne_bound; - struct ceph_osd_req_op r_ops[]; + struct ceph_osd_req_op r_ops[] __counted_by(r_num_ops); }; struct ceph_request_redirect { diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 265da00a1a..4a6b6b77cc 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -115,6 +115,11 @@ enum { * Enable recursive subtree protection */ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), + + /* + * Enable hugetlb accounting for the memory controller. + */ + CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), }; /* cftype->flags */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b307013b9c..0ef0af6608 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -40,13 +40,11 @@ struct kernel_clone_args; #define CGROUP_WEIGHT_DFL 100 #define CGROUP_WEIGHT_MAX 10000 -/* walk only threadgroup leaders */ -#define CSS_TASK_ITER_PROCS (1U << 0) -/* walk all threaded css_sets in the domain */ -#define CSS_TASK_ITER_THREADED (1U << 1) - -/* internal flags */ -#define CSS_TASK_ITER_SKIPPED (1U << 16) +enum { + CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */ + CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */ + CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */ +}; /* a css_task_iter should be treated as an opaque object */ struct css_task_iter { diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 53f1a7a932..9f1a9c455b 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -7,8 +7,9 @@ /* * DEFINE_FREE(name, type, free): * simple helper macro that defines the required wrapper for a __free() - * based cleanup function. @free is an expression using '_T' to access - * the variable. + * based cleanup function. @free is an expression using '_T' to access the + * variable. @free should typically include a NULL test before calling a + * function, see the example below. * * __free(name): * variable attribute to add a scoped based cleanup to the variable. @@ -17,6 +18,9 @@ * like a non-atomic xchg(var, NULL), such that the cleanup function will * be inhibited -- provided it sanely deals with a NULL value. * + * NOTE: this has __must_check semantics so that it is harder to accidentally + * leak the resource. + * * return_ptr(p): * returns p while inhibiting the __free(). * @@ -24,6 +28,8 @@ * * DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) * + * void *alloc_obj(...) + * { * struct obj *p __free(kfree) = kmalloc(...); * if (!p) * return NULL; @@ -32,6 +38,24 @@ * return NULL; * * return_ptr(p); + * } + * + * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though + * kfree() is fine to be called with a NULL value. This is on purpose. This way + * the compiler sees the end of our alloc_obj() function as: + * + * tmp = p; + * p = NULL; + * if (p) + * kfree(p); + * return tmp; + * + * And through the magic of value-propagation and dead-code-elimination, it + * eliminates the actual cleanup call and compiles into: + * + * return p; + * + * Without the NULL test it turns into a mess and the compiler can't help us. */ #define DEFINE_FREE(_name, _type, _free) \ @@ -39,8 +63,17 @@ #define __free(_name) __cleanup(__free_##_name) +#define __get_and_null_ptr(p) \ + ({ __auto_type __ptr = &(p); \ + __auto_type __val = *__ptr; \ + *__ptr = NULL; __val; }) + +static inline __must_check +const volatile void * __must_check_fn(const volatile void *val) +{ return val; } + #define no_free_ptr(p) \ - ({ __auto_type __ptr = (p); (p) = NULL; __ptr; }) + ((typeof(p)) __must_check_fn(__get_and_null_ptr(p))) #define return_ptr(p) return no_free_ptr(p) diff --git a/include/linux/closure.h b/include/linux/closure.h new file mode 100644 index 0000000000..c554c6a087 --- /dev/null +++ b/include/linux/closure.h @@ -0,0 +1,415 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CLOSURE_H +#define _LINUX_CLOSURE_H + +#include <linux/llist.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/workqueue.h> + +/* + * Closure is perhaps the most overused and abused term in computer science, but + * since I've been unable to come up with anything better you're stuck with it + * again. + * + * What are closures? + * + * They embed a refcount. The basic idea is they count "things that are in + * progress" - in flight bios, some other thread that's doing something else - + * anything you might want to wait on. + * + * The refcount may be manipulated with closure_get() and closure_put(). + * closure_put() is where many of the interesting things happen, when it causes + * the refcount to go to 0. + * + * Closures can be used to wait on things both synchronously and asynchronously, + * and synchronous and asynchronous use can be mixed without restriction. To + * wait synchronously, use closure_sync() - you will sleep until your closure's + * refcount hits 1. + * + * To wait asynchronously, use + * continue_at(cl, next_function, workqueue); + * + * passing it, as you might expect, the function to run when nothing is pending + * and the workqueue to run that function out of. + * + * continue_at() also, critically, requires a 'return' immediately following the + * location where this macro is referenced, to return to the calling function. + * There's good reason for this. + * + * To use safely closures asynchronously, they must always have a refcount while + * they are running owned by the thread that is running them. Otherwise, suppose + * you submit some bios and wish to have a function run when they all complete: + * + * foo_endio(struct bio *bio) + * { + * closure_put(cl); + * } + * + * closure_init(cl); + * + * do_stuff(); + * closure_get(cl); + * bio1->bi_endio = foo_endio; + * bio_submit(bio1); + * + * do_more_stuff(); + * closure_get(cl); + * bio2->bi_endio = foo_endio; + * bio_submit(bio2); + * + * continue_at(cl, complete_some_read, system_wq); + * + * If closure's refcount started at 0, complete_some_read() could run before the + * second bio was submitted - which is almost always not what you want! More + * importantly, it wouldn't be possible to say whether the original thread or + * complete_some_read()'s thread owned the closure - and whatever state it was + * associated with! + * + * So, closure_init() initializes a closure's refcount to 1 - and when a + * closure_fn is run, the refcount will be reset to 1 first. + * + * Then, the rule is - if you got the refcount with closure_get(), release it + * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount + * on a closure because you called closure_init() or you were run out of a + * closure - _always_ use continue_at(). Doing so consistently will help + * eliminate an entire class of particularly pernicious races. + * + * Lastly, you might have a wait list dedicated to a specific event, and have no + * need for specifying the condition - you just want to wait until someone runs + * closure_wake_up() on the appropriate wait list. In that case, just use + * closure_wait(). It will return either true or false, depending on whether the + * closure was already on a wait list or not - a closure can only be on one wait + * list at a time. + * + * Parents: + * + * closure_init() takes two arguments - it takes the closure to initialize, and + * a (possibly null) parent. + * + * If parent is non null, the new closure will have a refcount for its lifetime; + * a closure is considered to be "finished" when its refcount hits 0 and the + * function to run is null. Hence + * + * continue_at(cl, NULL, NULL); + * + * returns up the (spaghetti) stack of closures, precisely like normal return + * returns up the C stack. continue_at() with non null fn is better thought of + * as doing a tail call. + * + * All this implies that a closure should typically be embedded in a particular + * struct (which its refcount will normally control the lifetime of), and that + * struct can very much be thought of as a stack frame. + */ + +struct closure; +struct closure_syncer; +typedef void (closure_fn) (struct work_struct *); +extern struct dentry *bcache_debug; + +struct closure_waitlist { + struct llist_head list; +}; + +enum closure_state { + /* + * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by + * the thread that owns the closure, and cleared by the thread that's + * waking up the closure. + * + * The rest are for debugging and don't affect behaviour: + * + * CLOSURE_RUNNING: Set when a closure is running (i.e. by + * closure_init() and when closure_put() runs then next function), and + * must be cleared before remaining hits 0. Primarily to help guard + * against incorrect usage and accidentally transferring references. + * continue_at() and closure_return() clear it for you, if you're doing + * something unusual you can use closure_set_dead() which also helps + * annotate where references are being transferred. + */ + + CLOSURE_BITS_START = (1U << 26), + CLOSURE_DESTRUCTOR = (1U << 26), + CLOSURE_WAITING = (1U << 28), + CLOSURE_RUNNING = (1U << 30), +}; + +#define CLOSURE_GUARD_MASK \ + ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1) + +#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1) +#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING) + +struct closure { + union { + struct { + struct workqueue_struct *wq; + struct closure_syncer *s; + struct llist_node list; + closure_fn *fn; + }; + struct work_struct work; + }; + + struct closure *parent; + + atomic_t remaining; + bool closure_get_happened; + +#ifdef CONFIG_DEBUG_CLOSURES +#define CLOSURE_MAGIC_DEAD 0xc054dead +#define CLOSURE_MAGIC_ALIVE 0xc054a11e + + unsigned int magic; + struct list_head all; + unsigned long ip; + unsigned long waiting_on; +#endif +}; + +void closure_sub(struct closure *cl, int v); +void closure_put(struct closure *cl); +void __closure_wake_up(struct closure_waitlist *list); +bool closure_wait(struct closure_waitlist *list, struct closure *cl); +void __closure_sync(struct closure *cl); + +static inline unsigned closure_nr_remaining(struct closure *cl) +{ + return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK; +} + +/** + * closure_sync - sleep until a closure a closure has nothing left to wait on + * + * Sleeps until the refcount hits 1 - the thread that's running the closure owns + * the last refcount. + */ +static inline void closure_sync(struct closure *cl) +{ +#ifdef CONFIG_DEBUG_CLOSURES + BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened); +#endif + + if (cl->closure_get_happened) + __closure_sync(cl); +} + +#ifdef CONFIG_DEBUG_CLOSURES + +void closure_debug_create(struct closure *cl); +void closure_debug_destroy(struct closure *cl); + +#else + +static inline void closure_debug_create(struct closure *cl) {} +static inline void closure_debug_destroy(struct closure *cl) {} + +#endif + +static inline void closure_set_ip(struct closure *cl) +{ +#ifdef CONFIG_DEBUG_CLOSURES + cl->ip = _THIS_IP_; +#endif +} + +static inline void closure_set_ret_ip(struct closure *cl) +{ +#ifdef CONFIG_DEBUG_CLOSURES + cl->ip = _RET_IP_; +#endif +} + +static inline void closure_set_waiting(struct closure *cl, unsigned long f) +{ +#ifdef CONFIG_DEBUG_CLOSURES + cl->waiting_on = f; +#endif +} + +static inline void closure_set_stopped(struct closure *cl) +{ + atomic_sub(CLOSURE_RUNNING, &cl->remaining); +} + +static inline void set_closure_fn(struct closure *cl, closure_fn *fn, + struct workqueue_struct *wq) +{ + closure_set_ip(cl); + cl->fn = fn; + cl->wq = wq; +} + +static inline void closure_queue(struct closure *cl) +{ + struct workqueue_struct *wq = cl->wq; + /** + * Changes made to closure, work_struct, or a couple of other structs + * may cause work.func not pointing to the right location. + */ + BUILD_BUG_ON(offsetof(struct closure, fn) + != offsetof(struct work_struct, func)); + + if (wq) { + INIT_WORK(&cl->work, cl->work.func); + BUG_ON(!queue_work(wq, &cl->work)); + } else + cl->fn(&cl->work); +} + +/** + * closure_get - increment a closure's refcount + */ +static inline void closure_get(struct closure *cl) +{ + cl->closure_get_happened = true; + +#ifdef CONFIG_DEBUG_CLOSURES + BUG_ON((atomic_inc_return(&cl->remaining) & + CLOSURE_REMAINING_MASK) <= 1); +#else + atomic_inc(&cl->remaining); +#endif +} + +/** + * closure_init - Initialize a closure, setting the refcount to 1 + * @cl: closure to initialize + * @parent: parent of the new closure. cl will take a refcount on it for its + * lifetime; may be NULL. + */ +static inline void closure_init(struct closure *cl, struct closure *parent) +{ + cl->fn = NULL; + cl->parent = parent; + if (parent) + closure_get(parent); + + atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); + cl->closure_get_happened = false; + + closure_debug_create(cl); + closure_set_ip(cl); +} + +static inline void closure_init_stack(struct closure *cl) +{ + memset(cl, 0, sizeof(struct closure)); + atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +} + +/** + * closure_wake_up - wake up all closures on a wait list, + * with memory barrier + */ +static inline void closure_wake_up(struct closure_waitlist *list) +{ + /* Memory barrier for the wait list */ + smp_mb(); + __closure_wake_up(list); +} + +#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws) +#define closure_type(name, type, member) \ + struct closure *cl = container_of(ws, struct closure, work); \ + type *name = container_of(cl, type, member) + +/** + * continue_at - jump to another function with barrier + * + * After @cl is no longer waiting on anything (i.e. all outstanding refs have + * been dropped with closure_put()), it will resume execution at @fn running out + * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly). + * + * This is because after calling continue_at() you no longer have a ref on @cl, + * and whatever @cl owns may be freed out from under you - a running closure fn + * has a ref on its own closure which continue_at() drops. + * + * Note you are expected to immediately return after using this macro. + */ +#define continue_at(_cl, _fn, _wq) \ +do { \ + set_closure_fn(_cl, _fn, _wq); \ + closure_sub(_cl, CLOSURE_RUNNING + 1); \ +} while (0) + +/** + * closure_return - finish execution of a closure + * + * This is used to indicate that @cl is finished: when all outstanding refs on + * @cl have been dropped @cl's ref on its parent closure (as passed to + * closure_init()) will be dropped, if one was specified - thus this can be + * thought of as returning to the parent closure. + */ +#define closure_return(_cl) continue_at((_cl), NULL, NULL) + +/** + * continue_at_nobarrier - jump to another function without barrier + * + * Causes @fn to be executed out of @cl, in @wq context (or called directly if + * @wq is NULL). + * + * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn, + * thus it's not safe to touch anything protected by @cl after a + * continue_at_nobarrier(). + */ +#define continue_at_nobarrier(_cl, _fn, _wq) \ +do { \ + set_closure_fn(_cl, _fn, _wq); \ + closure_queue(_cl); \ +} while (0) + +/** + * closure_return_with_destructor - finish execution of a closure, + * with destructor + * + * Works like closure_return(), except @destructor will be called when all + * outstanding refs on @cl have been dropped; @destructor may be used to safely + * free the memory occupied by @cl, and it is called with the ref on the parent + * closure still held - so @destructor could safely return an item to a + * freelist protected by @cl's parent. + */ +#define closure_return_with_destructor(_cl, _destructor) \ +do { \ + set_closure_fn(_cl, _destructor, NULL); \ + closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ +} while (0) + +/** + * closure_call - execute @fn out of a new, uninitialized closure + * + * Typically used when running out of one closure, and we want to run @fn + * asynchronously out of a new closure - @parent will then wait for @cl to + * finish. + */ +static inline void closure_call(struct closure *cl, closure_fn fn, + struct workqueue_struct *wq, + struct closure *parent) +{ + closure_init(cl, parent); + continue_at_nobarrier(cl, fn, wq); +} + +#define __closure_wait_event(waitlist, _cond) \ +do { \ + struct closure cl; \ + \ + closure_init_stack(&cl); \ + \ + while (1) { \ + closure_wait(waitlist, &cl); \ + if (_cond) \ + break; \ + closure_sync(&cl); \ + } \ + closure_wake_up(waitlist); \ + closure_sync(&cl); \ +} while (0) + +#define closure_wait_event(waitlist, _cond) \ +do { \ + if (!(_cond)) \ + __closure_wait_event(waitlist, _cond); \ +} while (0) + +#endif /* _LINUX_CLOSURE_H */ diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h index d8264417e5..d527f04400 100644 --- a/include/linux/comedi/comedi_8254.h +++ b/include/linux/comedi/comedi_8254.h @@ -12,6 +12,8 @@ #define _COMEDI_8254_H #include <linux/types.h> +#include <linux/errno.h> +#include <linux/err.h> struct comedi_device; struct comedi_insn; @@ -57,10 +59,24 @@ struct comedi_subdevice; /* counter maps zero to 0x10000 */ #define I8254_MAX_COUNT 0x10000 +struct comedi_8254; + +/** + * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access + * @i8254: pointer to struct comedi_8254 + * @dir: direction (0 = read, 1 = write) + * @reg: register number + * @val: value to write + * + * Return: Register value when reading, 0 when writing. + */ +typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir, + unsigned int reg, unsigned int val); + /** * struct comedi_8254 - private data used by this module - * @iobase: PIO base address of the registers (in/out) - * @mmio: MMIO base address of the registers (read/write) + * @iocb: I/O call-back function for register access + * @context: context for register access (e.g. a base address) * @iosize: I/O size used to access the registers (b/w/l) * @regshift: register gap shift * @osc_base: cascaded oscillator speed in ns @@ -76,8 +92,8 @@ struct comedi_subdevice; * @insn_config: driver specific (*insn_config) callback */ struct comedi_8254 { - unsigned long iobase; - void __iomem *mmio; + comedi_8254_iocb_fn *iocb; + unsigned long context; unsigned int iosize; unsigned int regshift; unsigned int osc_base; @@ -122,13 +138,24 @@ void comedi_8254_set_busy(struct comedi_8254 *i8254, void comedi_8254_subdevice_init(struct comedi_subdevice *s, struct comedi_8254 *i8254); -struct comedi_8254 *comedi_8254_init(unsigned long iobase, - unsigned int osc_base, - unsigned int iosize, - unsigned int regshift); -struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio, - unsigned int osc_base, - unsigned int iosize, - unsigned int regshift); +#ifdef CONFIG_HAS_IOPORT +struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase, + unsigned int osc_base, + unsigned int iosize, + unsigned int regshift); +#else +static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase, + unsigned int osc_base, + unsigned int iosize, + unsigned int regshift) +{ + return ERR_PTR(-ENXIO); +} +#endif + +struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio, + unsigned int osc_base, + unsigned int iosize, + unsigned int regshift); #endif /* _COMEDI_8254_H */ diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h index b2a5bc6b3a..d24a69da38 100644 --- a/include/linux/comedi/comedi_8255.h +++ b/include/linux/comedi/comedi_8255.h @@ -10,6 +10,8 @@ #ifndef _COMEDI_8255_H #define _COMEDI_8255_H +#include <linux/errno.h> + #define I8255_SIZE 0x04 #define I8255_DATA_A_REG 0x00 @@ -27,16 +29,26 @@ struct comedi_device; struct comedi_subdevice; -int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io)(struct comedi_device *dev, int dir, int port, - int data, unsigned long regbase), - unsigned long regbase); +#ifdef CONFIG_HAS_IOPORT +int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s, + unsigned long regbase); +#else +static inline int subdev_8255_io_init(struct comedi_device *dev, + struct comedi_subdevice *s, + unsigned long regbase) +{ + return -ENXIO; +} +#endif int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s, - int (*io)(struct comedi_device *dev, int dir, int port, - int data, unsigned long regbase), unsigned long regbase); +int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s, + int (*io)(struct comedi_device *dev, int dir, int port, + int data, unsigned long context), + unsigned long context); + unsigned long subdev_8255_regbase(struct comedi_subdevice *s); #endif diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h index 0a1150900e..c08416a736 100644 --- a/include/linux/comedi/comedidev.h +++ b/include/linux/comedi/comedidev.h @@ -633,7 +633,7 @@ extern const struct comedi_lrange range_unknown; */ struct comedi_lrange { int length; - struct comedi_krange range[]; + struct comedi_krange range[] __counted_by(length); }; /** diff --git a/include/linux/compat.h b/include/linux/compat.h index 1cfa4f0f49..233f61ec8a 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -581,7 +581,6 @@ asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id, struct io_event __user *events, struct __kernel_timespec __user *timeout, const struct __compat_aio_sigset __user *usig); -asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); asmlinkage long compat_sys_epoll_pwait(int epfd, struct epoll_event __user *events, int maxevents, int timeout, diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 9b673fefce..ddab1ef22b 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -14,11 +14,6 @@ #undef __cleanup #define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func))) -/* same as gcc, this was present in clang-2.6 so we can assume it works - * with any version that can compile the kernel - */ -#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 7af9e34ec2..687aaaff80 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -39,8 +39,6 @@ #define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif -#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) #define __latent_entropy __attribute__((latent_entropy)) #endif @@ -66,6 +64,26 @@ __builtin_unreachable(); \ } while (0) +/* + * GCC 'asm goto' with outputs miscompiles certain code sequences: + * + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921 + * + * Work around it via the same compiler barrier quirk that we used + * to use for the old 'asm goto' workaround. + * + * Also, always mark such 'asm goto' statements as volatile: all + * asm goto statements are supposed to be volatile as per the + * documentation, but some versions of gcc didn't actually do + * that for asms with outputs: + * + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619 + */ +#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND +#define asm_goto_output(x...) \ + do { asm volatile goto(x); asm (""); } while (0) +#endif + #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d7779a18b2..bb1339c705 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -177,10 +177,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, __asm__ ("" : "=r" (var) : "0" (var)) #endif -/* Not-quite-unique ID. */ -#ifndef __UNIQUE_ID -# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) -#endif +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) /** * data_race - mark an expression as containing intentional data races @@ -231,6 +228,14 @@ static inline void *offset_to_ptr(const int *off) #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) /* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + +/* * Whether 'type' is a signed type or an unsigned type. Supports scalar types, * bool and also pointer types. */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index c523c66837..0caf354cb9 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -2,6 +2,15 @@ #ifndef __LINUX_COMPILER_TYPES_H #define __LINUX_COMPILER_TYPES_H +/* + * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. + * In the meantime, to support gcc < 10, we implement __has_builtin + * by hand. + */ +#ifndef __has_builtin +#define __has_builtin(x) (0) +#endif + #ifndef __ASSEMBLY__ /* @@ -134,17 +143,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __preserve_most #endif -/* Builtins */ - -/* - * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21. - * In the meantime, to support gcc < 10, we implement __has_builtin - * by hand. - */ -#ifndef __has_builtin -#define __has_builtin(x) (0) -#endif - /* Compiler specific macros. */ #ifdef __clang__ #include <linux/compiler-clang.h> @@ -352,8 +350,27 @@ struct ftrace_likely_data { # define __realloc_size(x, ...) #endif -#ifndef asm_volatile_goto -#define asm_volatile_goto(x...) asm goto(x) +/* + * When the size of an allocated object is needed, use the best available + * mechanism to find it. (For cases where sizeof() cannot be used.) + */ +#if __has_builtin(__builtin_dynamic_object_size) +#define __struct_size(p) __builtin_dynamic_object_size(p, 0) +#define __member_size(p) __builtin_dynamic_object_size(p, 1) +#else +#define __struct_size(p) __builtin_object_size(p, 0) +#define __member_size(p) __builtin_object_size(p, 1) +#endif + +/* + * Some versions of gcc do not mark 'asm goto' volatile: + * + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979 + * + * We do it here by hand, because it doesn't hurt. + */ +#ifndef asm_goto_output +#define asm_goto_output(x...) asm volatile goto(x) #endif #ifdef CONFIG_CC_HAS_ASM_INLINE diff --git a/include/linux/console.h b/include/linux/console.h index 7de11c763e..779d388af8 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -101,6 +101,13 @@ extern const struct consw dummy_con; /* dummy console buffer */ extern const struct consw vga_con; /* VGA text console */ extern const struct consw newport_con; /* SGI Newport console */ +struct screen_info; +#ifdef CONFIG_VGA_CONSOLE +void vgacon_register_screen(struct screen_info *si); +#else +static inline void vgacon_register_screen(struct screen_info *si) { } +#endif + int con_is_bound(const struct consw *csw); int do_unregister_con_driver(const struct consw *csw); int do_take_over_console(const struct consw *sw, int first, int last, int deflt); @@ -156,6 +163,8 @@ static inline int con_debug_leave(void) * /dev/kmesg which requires a larger output buffer. * @CON_SUSPENDED: Indicates if a console is suspended. If true, the * printing callbacks must not be called. + * @CON_NBCON: Console can operate outside of the legacy style console_lock + * constraints. */ enum cons_flags { CON_PRINTBUFFER = BIT(0), @@ -166,6 +175,111 @@ enum cons_flags { CON_BRL = BIT(5), CON_EXTENDED = BIT(6), CON_SUSPENDED = BIT(7), + CON_NBCON = BIT(8), +}; + +/** + * struct nbcon_state - console state for nbcon consoles + * @atom: Compound of the state fields for atomic operations + * + * @req_prio: The priority of a handover request + * @prio: The priority of the current owner + * @unsafe: Console is busy in a non takeover region + * @unsafe_takeover: A hostile takeover in an unsafe state happened in the + * past. The console cannot be safe until re-initialized. + * @cpu: The CPU on which the owner runs + * + * To be used for reading and preparing of the value stored in the nbcon + * state variable @console::nbcon_state. + * + * The @prio and @req_prio fields are particularly important to allow + * spin-waiting to timeout and give up without the risk of a waiter being + * assigned the lock after giving up. + */ +struct nbcon_state { + union { + unsigned int atom; + struct { + unsigned int prio : 2; + unsigned int req_prio : 2; + unsigned int unsafe : 1; + unsigned int unsafe_takeover : 1; + unsigned int cpu : 24; + }; + }; +}; + +/* + * The nbcon_state struct is used to easily create and interpret values that + * are stored in the @console::nbcon_state variable. Ensure this struct stays + * within the size boundaries of the atomic variable's underlying type in + * order to avoid any accidental truncation. + */ +static_assert(sizeof(struct nbcon_state) <= sizeof(int)); + +/** + * nbcon_prio - console owner priority for nbcon consoles + * @NBCON_PRIO_NONE: Unused + * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage + * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...) + * @NBCON_PRIO_PANIC: Panic output + * @NBCON_PRIO_MAX: The number of priority levels + * + * A higher priority context can takeover the console when it is + * in the safe state. The final attempt to flush consoles in panic() + * can be allowed to do so even in an unsafe state (Hope and pray). + */ +enum nbcon_prio { + NBCON_PRIO_NONE = 0, + NBCON_PRIO_NORMAL, + NBCON_PRIO_EMERGENCY, + NBCON_PRIO_PANIC, + NBCON_PRIO_MAX, +}; + +struct console; +struct printk_buffers; + +/** + * struct nbcon_context - Context for console acquire/release + * @console: The associated console + * @spinwait_max_us: Limit for spin-wait acquire + * @prio: Priority of the context + * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can + * be used only with NBCON_PRIO_PANIC @prio. It + * might cause a system freeze when the console + * is used later. + * @backlog: Ringbuffer has pending records + * @pbufs: Pointer to the text buffer for this context + * @seq: The sequence number to print for this context + */ +struct nbcon_context { + /* members set by caller */ + struct console *console; + unsigned int spinwait_max_us; + enum nbcon_prio prio; + unsigned int allow_unsafe_takeover : 1; + + /* members set by emit */ + unsigned int backlog : 1; + + /* members set by acquire */ + struct printk_buffers *pbufs; + u64 seq; +}; + +/** + * struct nbcon_write_context - Context handed to the nbcon write callbacks + * @ctxt: The core console context + * @outbuf: Pointer to the text buffer for output + * @len: Length to write + * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred + */ +struct nbcon_write_context { + struct nbcon_context __private ctxt; + char *outbuf; + unsigned int len; + bool unsafe_takeover; }; /** @@ -187,6 +301,11 @@ enum cons_flags { * @dropped: Number of unreported dropped ringbuffer records * @data: Driver private data * @node: hlist node for the console list + * + * @write_atomic: Write callback for atomic context + * @nbcon_state: State for nbcon consoles + * @nbcon_seq: Sequence number of the next record for nbcon to print + * @pbufs: Pointer to nbcon private buffer */ struct console { char name[16]; @@ -206,6 +325,13 @@ struct console { unsigned long dropped; void *data; struct hlist_node node; + + /* nbcon console specific members */ + bool (*write_atomic)(struct console *con, + struct nbcon_write_context *wctxt); + atomic_t __private nbcon_state; + atomic_long_t __private nbcon_seq; + struct printk_buffers *pbufs; }; #ifdef CONFIG_LOCKDEP @@ -332,6 +458,16 @@ static inline bool console_is_registered(const struct console *con) lockdep_assert_console_list_lock_held(); \ hlist_for_each_entry(con, &console_list, node) +#ifdef CONFIG_PRINTK +extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt); +extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt); +extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt); +#else +static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; } +static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; } +static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; } +#endif + extern int console_set_on_cmdline; extern struct console *early_console; @@ -340,7 +476,7 @@ enum con_flush_mode { CONSOLE_REPLAY_ALL, }; -extern int add_preferred_console(char *name, int idx, char *options); +extern int add_preferred_console(const char *name, const short idx, char *options); extern void console_force_preferred_locked(struct console *con); extern void register_console(struct console *); extern int unregister_console(struct console *); diff --git a/include/linux/const.h b/include/linux/const.h index 435ddd72d2..81b8aae5a8 100644 --- a/include/linux/const.h +++ b/include/linux/const.h @@ -3,12 +3,4 @@ #include <vdso/const.h> -/* - * This returns a constant expression while determining if an argument is - * a constant expression, most importantly without evaluating the argument. - * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> - */ -#define __is_constexpr(x) \ - (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) - #endif /* _LINUX_CONST_H */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index eb768a866f..fc80944190 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -155,6 +155,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; } static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } #endif /* !CONFIG_HOTPLUG_CPU */ +DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock()) + #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); extern void thaw_secondary_cpus(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 71d186d693..1c5ca92a05 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -1193,14 +1193,6 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_ } #endif -#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) -void sched_cpufreq_governor_change(struct cpufreq_policy *policy, - struct cpufreq_governor *old_gov); -#else -static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, - struct cpufreq_governor *old_gov) { } -#endif - extern unsigned int arch_freq_get_on_cpu(int cpu); #ifndef arch_set_freq_scale diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 624d4a38c3..efc0c0b07e 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -172,6 +172,7 @@ enum cpuhp_state { CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, + CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, @@ -206,7 +207,6 @@ enum cpuhp_state { CPUHP_AP_KVM_ONLINE, CPUHP_AP_SCHED_WAIT_EMPTY, CPUHP_AP_SMPBOOT_THREADS, - CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, CPUHP_AP_BLK_MQ_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index f10fb87d49..cfb545841a 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -4,7 +4,7 @@ /* * Cpumasks provide a bitmap suitable for representing the - * set of CPU's in a system, one bit position per CPU number. In general, + * set of CPUs in a system, one bit position per CPU number. In general, * only nr_cpu_ids (<= NR_CPUS) bits are valid. */ #include <linux/kernel.h> @@ -97,7 +97,7 @@ static inline void set_nr_cpu_ids(unsigned int nr) * * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. * - * The cpu_possible_mask is fixed at boot time, as the set of CPU id's + * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs * that it is possible might ever be plugged in at anytime during the * life of that system boot. The cpu_present_mask is dynamic(*), * representing which CPUs are currently plugged in. And @@ -112,7 +112,7 @@ static inline void set_nr_cpu_ids(unsigned int nr) * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. * * Subtleties: - * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode + * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode * assumption that their single CPU is online. The UP * cpu_{online,possible,present}_masks are placebos. Changing them * will have no useful affect on the following num_*_cpus() @@ -155,7 +155,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu) * cpumask_first - get the first cpu in a cpumask * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ static inline unsigned int cpumask_first(const struct cpumask *srcp) { @@ -166,7 +166,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp) * cpumask_first_zero - get the first unset cpu in a cpumask * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if all cpus are set. + * Return: >= nr_cpu_ids if all cpus are set. */ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) { @@ -178,7 +178,7 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) * @srcp1: the first input * @srcp2: the second input * - * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). + * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). */ static inline unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2) @@ -190,7 +190,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask * cpumask_last - get the last CPU in a cpumask * @srcp: - the cpumask pointer * - * Returns >= nr_cpumask_bits if no CPUs set. + * Return: >= nr_cpumask_bits if no CPUs set. */ static inline unsigned int cpumask_last(const struct cpumask *srcp) { @@ -199,10 +199,10 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp) /** * cpumask_next - get the next cpu in a cpumask - * @n: the cpu prior to the place to search (ie. return will be > @n) + * @n: the cpu prior to the place to search (i.e. return will be > @n) * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if no further cpus set. + * Return: >= nr_cpu_ids if no further cpus set. */ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) @@ -215,10 +215,10 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp) /** * cpumask_next_zero - get the next unset cpu in a cpumask - * @n: the cpu prior to the place to search (ie. return will be > @n) + * @n: the cpu prior to the place to search (i.e. return will be > @n) * @srcp: the cpumask pointer * - * Returns >= nr_cpu_ids if no further cpus unset. + * Return: >= nr_cpu_ids if no further cpus unset. */ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) { @@ -254,11 +254,11 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp); /** * cpumask_next_and - get the next cpu in *src1p & *src2p - * @n: the cpu prior to the place to search (ie. return will be > @n) + * @n: the cpu prior to the place to search (i.e. return will be > @n) * @src1p: the first cpumask pointer * @src2p: the second cpumask pointer * - * Returns >= nr_cpu_ids if no further cpus set in both. + * Return: >= nr_cpu_ids if no further cpus set in both. */ static inline unsigned int cpumask_next_and(int n, const struct cpumask *src1p, @@ -373,7 +373,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta * @cpu: the cpu to ignore. * * Often used to find any cpu but smp_processor_id() in a mask. - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ static inline unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) @@ -388,11 +388,11 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } /** - * cpumask_nth - get the first cpu in a cpumask + * cpumask_nth - get the Nth cpu in a cpumask * @srcp: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp) { @@ -400,12 +400,12 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s } /** - * cpumask_nth_and - get the first cpu in 2 cpumasks + * cpumask_nth_and - get the Nth cpu in 2 cpumasks * @srcp1: the cpumask pointer * @srcp2: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static inline unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1, @@ -416,12 +416,12 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1, } /** - * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd. + * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd. * @srcp1: the cpumask pointer * @srcp2: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static inline unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1, @@ -436,9 +436,9 @@ unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1, * @srcp1: the cpumask pointer * @srcp2: the cpumask pointer * @srcp3: the cpumask pointer - * @cpu: the N'th cpu to find, starting from 0 + * @cpu: the Nth cpu to find, starting from 0 * - * Returns >= nr_cpu_ids if such cpu doesn't exist. + * Return: >= nr_cpu_ids if such cpu doesn't exist. */ static __always_inline unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1, @@ -497,7 +497,7 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns true if @cpu is set in @cpumask, else returns false + * Return: true if @cpu is set in @cpumask, else returns false */ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask) { @@ -509,9 +509,9 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns true if @cpu is set in old bitmap of @cpumask, else returns false - * * test_and_set_bit wrapper for cpumasks. + * + * Return: true if @cpu is set in old bitmap of @cpumask, else returns false */ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) { @@ -523,9 +523,9 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns true if @cpu is set in old bitmap of @cpumask, else returns false - * * test_and_clear_bit wrapper for cpumasks. + * + * Return: true if @cpu is set in old bitmap of @cpumask, else returns false */ static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) { @@ -560,7 +560,7 @@ static inline void cpumask_clear(struct cpumask *dstp) * @src1p: the first input * @src2p: the second input * - * If *@dstp is empty, returns false, else returns true + * Return: false if *@dstp is empty, else returns true */ static inline bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, @@ -603,7 +603,7 @@ static inline void cpumask_xor(struct cpumask *dstp, * @src1p: the first input * @src2p: the second input * - * If *@dstp is empty, returns false, else returns true + * Return: false if *@dstp is empty, else returns true */ static inline bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, @@ -617,6 +617,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp, * cpumask_equal - *src1p == *src2p * @src1p: the first input * @src2p: the second input + * + * Return: true if the cpumasks are equal, false if not */ static inline bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) @@ -630,6 +632,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p, * @src1p: the first input * @src2p: the second input * @src3p: the third input + * + * Return: true if first cpumask ORed with second cpumask == third cpumask, + * otherwise false */ static inline bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p, @@ -643,6 +648,9 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p, * cpumask_intersects - (*src1p & *src2p) != 0 * @src1p: the first input * @src2p: the second input + * + * Return: true if first cpumask ANDed with second cpumask is non-empty, + * otherwise false */ static inline bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) @@ -656,7 +664,7 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, * @src1p: the first input * @src2p: the second input * - * Returns true if *@src1p is a subset of *@src2p, else returns false + * Return: true if *@src1p is a subset of *@src2p, else returns false */ static inline bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) @@ -668,6 +676,8 @@ static inline bool cpumask_subset(const struct cpumask *src1p, /** * cpumask_empty - *srcp == 0 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. + * + * Return: true if srcp is empty (has no bits set), else false */ static inline bool cpumask_empty(const struct cpumask *srcp) { @@ -677,6 +687,8 @@ static inline bool cpumask_empty(const struct cpumask *srcp) /** * cpumask_full - *srcp == 0xFFFFFFFF... * @srcp: the cpumask to that all cpus < nr_cpu_ids are set. + * + * Return: true if srcp is full (has all bits set), else false */ static inline bool cpumask_full(const struct cpumask *srcp) { @@ -686,6 +698,8 @@ static inline bool cpumask_full(const struct cpumask *srcp) /** * cpumask_weight - Count of bits in *srcp * @srcp: the cpumask to count bits (< nr_cpu_ids) in. + * + * Return: count of bits set in *srcp */ static inline unsigned int cpumask_weight(const struct cpumask *srcp) { @@ -696,6 +710,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp) * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2) * @srcp1: the cpumask to count bits (< nr_cpu_ids) in. * @srcp2: the cpumask to count bits (< nr_cpu_ids) in. + * + * Return: count of bits set in both *srcp1 and *srcp2 */ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2) @@ -744,7 +760,7 @@ static inline void cpumask_copy(struct cpumask *dstp, * cpumask_any - pick a "random" cpu from *srcp * @srcp: the input cpumask * - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ #define cpumask_any(srcp) cpumask_first(srcp) @@ -753,7 +769,7 @@ static inline void cpumask_copy(struct cpumask *dstp, * @mask1: the first input cpumask * @mask2: the second input cpumask * - * Returns >= nr_cpu_ids if no cpus set. + * Return: >= nr_cpu_ids if no cpus set. */ #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) @@ -769,7 +785,7 @@ static inline void cpumask_copy(struct cpumask *dstp, * @len: the length of the buffer * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp) @@ -783,7 +799,7 @@ static inline int cpumask_parse_user(const char __user *buf, int len, * @len: the length of the buffer * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp) @@ -797,7 +813,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, * @buf: the buffer to extract from * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) { @@ -809,7 +825,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) * @buf: the buffer to extract from * @dstp: the cpumask to set. * - * Returns -errno, or 0 for success. + * Return: -errno, or 0 for success. */ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) { @@ -817,7 +833,9 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) } /** - * cpumask_size - size to allocate for a 'struct cpumask' in bytes + * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes + * + * Return: size to allocate for a &struct cpumask in bytes */ static inline unsigned int cpumask_size(void) { @@ -831,7 +849,7 @@ static inline unsigned int cpumask_size(void) * little more difficult, we typedef cpumask_var_t to an array or a * pointer: doing &mask on an array is a noop, so it still works. * - * ie. + * i.e. * cpumask_var_t tmpmask; * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) * return -ENOMEM; @@ -887,6 +905,8 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) * a nop returning a constant 1 (in <linux/cpumask.h>). * * See alloc_cpumask_var_node. + * + * Return: %true if allocation succeeded, %false if not */ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) @@ -1025,7 +1045,7 @@ set_cpu_dying(unsigned int cpu, bool dying) } /** - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask * * @bitmap: the bitmap * * There are a few places where cpumask_var_t isn't appropriate and @@ -1068,6 +1088,8 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) * interface gives only a momentary snapshot and is not protected against * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held * region. + * + * Return: momentary snapshot of the number of online CPUs */ static __always_inline unsigned int num_online_cpus(void) { @@ -1160,7 +1182,7 @@ static inline bool cpu_dying(unsigned int cpu) * @mask: the cpumask to copy * @buf: the buffer to copy into * - * Returns the length of the (null-terminated) @buf string, zero if + * Return: the length of the (null-terminated) @buf string, zero if * nothing is copied. */ static inline ssize_t @@ -1183,7 +1205,7 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) * cpumask; Typically used by bin_attribute to export cpumask bitmask * ABI. * - * Returns the length of how many bytes have been copied, excluding + * Return: the length of how many bytes have been copied, excluding * terminating '\0'. */ static inline ssize_t @@ -1204,6 +1226,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, * * Everything is same with the above cpumap_print_bitmask_to_buf() * except the print format. + * + * Return: the length of how many bytes have been copied, excluding + * terminating '\0'. */ static inline ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 0c06561bf5..5126a4fecb 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -5,6 +5,14 @@ #include <linux/linkage.h> #include <linux/elfcore.h> #include <linux/elf.h> +#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION +#include <asm/crash_core.h> +#endif + +/* Location of a reserved region to hold the crash kernel. + */ +extern struct resource crashk_res; +extern struct resource crashk_low_res; #define CRASH_CORE_NOTE_NAME "CORE" #define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4) @@ -79,12 +87,43 @@ Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); +#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION +#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE +#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) +#endif +#endif + int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, - unsigned long long *crash_size, unsigned long long *crash_base); -int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, - unsigned long long *crash_size, unsigned long long *crash_base); -int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, - unsigned long long *crash_size, unsigned long long *crash_base); + unsigned long long *crash_size, unsigned long long *crash_base, + unsigned long long *low_size, bool *high); + +#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION +#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE +#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20) +#endif +#ifndef CRASH_ALIGN +#define CRASH_ALIGN SZ_2M +#endif +#ifndef CRASH_ADDR_LOW_MAX +#define CRASH_ADDR_LOW_MAX SZ_4G +#endif +#ifndef CRASH_ADDR_HIGH_MAX +#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM() +#endif + +void __init reserve_crashkernel_generic(char *cmdline, + unsigned long long crash_size, + unsigned long long crash_base, + unsigned long long crash_low_size, + bool high); +#else +static inline void __init reserve_crashkernel_generic(char *cmdline, + unsigned long long crash_size, + unsigned long long crash_base, + unsigned long long crash_low_size, + bool high) +{} +#endif /* Alignment required for elf header segment */ #define ELF_CORE_HEADER_ALIGN 4096 @@ -92,7 +131,7 @@ int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; - struct range ranges[]; + struct range ranges[] __counted_by(max_nr_ranges); }; extern int crash_exclude_mem_range(struct crash_mem *mem, diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 0f3a656293..acc55626af 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -50,6 +50,7 @@ void vmcore_cleanup(void); #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) #endif +#ifndef is_kdump_kernel /* * is_kdump_kernel() checks whether this kernel is booting after a panic of * previous kernel or not. This is determined by checking if previous kernel @@ -64,6 +65,7 @@ static inline bool is_kdump_kernel(void) { return elfcorehdr_addr != ELFCORE_ADDR_MAX; } +#endif /* is_vmcore_usable() checks if the kernel is booting after a panic and * the vmcore region is usable. @@ -75,7 +77,8 @@ static inline bool is_kdump_kernel(void) static inline int is_vmcore_usable(void) { - return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0; + return elfcorehdr_addr != ELFCORE_ADDR_ERR && + elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0; } /* vmcore_unusable() marks the vmcore as unusable, @@ -84,8 +87,7 @@ static inline int is_vmcore_usable(void) static inline void vmcore_unusable(void) { - if (is_kdump_kernel()) - elfcorehdr_addr = ELFCORE_ADDR_ERR; + elfcorehdr_addr = ELFCORE_ADDR_ERR; } /** diff --git a/include/linux/cred.h b/include/linux/cred.h index bb55703e11..2976f534a7 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/key.h> #include <linux/atomic.h> +#include <linux/refcount.h> #include <linux/uidgid.h> #include <linux/sched.h> #include <linux/sched/user.h> @@ -23,7 +24,7 @@ struct inode; * COW Supplementary groups list */ struct group_info { - atomic_t usage; + refcount_t usage; int ngroups; kgid_t gid[]; } __randomize_layout; @@ -39,7 +40,7 @@ struct group_info { */ static inline struct group_info *get_group_info(struct group_info *gi) { - atomic_inc(&gi->usage); + refcount_inc(&gi->usage); return gi; } @@ -49,7 +50,7 @@ static inline struct group_info *get_group_info(struct group_info *gi) */ #define put_group_info(group_info) \ do { \ - if (atomic_dec_and_test(&(group_info)->usage)) \ + if (refcount_dec_and_test(&(group_info)->usage)) \ groups_free(group_info); \ } while (0) @@ -172,6 +173,20 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) } /** + * get_new_cred_many - Get references on a new set of credentials + * @cred: The new credentials to reference + * @nr: Number of references to acquire + * + * Get references on the specified set of new credentials. The caller must + * release all acquired references. + */ +static inline struct cred *get_new_cred_many(struct cred *cred, int nr) +{ + atomic_long_add(nr, &cred->usage); + return cred; +} + +/** * get_new_cred - Get a reference on a new set of credentials * @cred: The new credentials to reference * @@ -180,16 +195,16 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) */ static inline struct cred *get_new_cred(struct cred *cred) { - atomic_long_inc(&cred->usage); - return cred; + return get_new_cred_many(cred, 1); } /** - * get_cred - Get a reference on a set of credentials + * get_cred_many - Get references on a set of credentials * @cred: The credentials to reference + * @nr: Number of references to acquire * - * Get a reference on the specified set of credentials. The caller must - * release the reference. If %NULL is passed, it is returned with no action. + * Get references on the specified set of credentials. The caller must release + * all acquired reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the @@ -197,13 +212,27 @@ static inline struct cred *get_new_cred(struct cred *cred) * accidental alteration of a set of credentials that should be considered * immutable. */ -static inline const struct cred *get_cred(const struct cred *cred) +static inline const struct cred *get_cred_many(const struct cred *cred, int nr) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return cred; nonconst_cred->non_rcu = 0; - return get_new_cred(nonconst_cred); + return get_new_cred_many(nonconst_cred, nr); +} + +/* + * get_cred - Get a reference on a set of credentials + * @cred: The credentials to reference + * + * Get a reference on the specified set of credentials. The caller must + * release the reference. If %NULL is passed, it is returned with no action. + * + * This is used to deal with a committed set of credentials. + */ +static inline const struct cred *get_cred(const struct cred *cred) +{ + return get_cred_many(cred, 1); } static inline const struct cred *get_cred_rcu(const struct cred *cred) @@ -220,6 +249,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) /** * put_cred - Release a reference to a set of credentials * @cred: The credentials to release + * @nr: Number of references to release * * Release a reference to a set of credentials, deleting them when the last ref * is released. If %NULL is passed, nothing is done. @@ -228,16 +258,28 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) * on task_struct are attached by const pointers to prevent accidental * alteration of otherwise immutable credential sets. */ -static inline void put_cred(const struct cred *_cred) +static inline void put_cred_many(const struct cred *_cred, int nr) { struct cred *cred = (struct cred *) _cred; if (cred) { - if (atomic_long_dec_and_test(&(cred)->usage)) + if (atomic_long_sub_and_test(nr, &cred->usage)) __put_cred(cred); } } +/* + * put_cred - Release a reference to a set of credentials + * @cred: The credentials to release + * + * Release a reference to a set of credentials, deleting them when the last ref + * is released. If %NULL is passed, nothing is done. + */ +static inline void put_cred(const struct cred *cred) +{ + put_cred_many(cred, 1); +} + /** * current_cred - Access the current task's subjective credentials * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 31f6fee0c3..b164da5e12 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 +#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_SIG 0x00000007 @@ -35,8 +36,6 @@ #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f -#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 @@ -111,7 +110,6 @@ * crypto_aead_walksize() (with the remainder going at the end), no chunk * can cross a page boundary or a scatterlist element boundary. * ahash: - * - The result buffer must be aligned to the algorithm's alignmask. * - crypto_ahash_finup() must not be used unless the algorithm implements * ->finup() natively. */ @@ -279,18 +277,20 @@ struct compress_alg { * @cra_ctxsize: Size of the operational context of the transformation. This * value informs the kernel crypto API about the memory size * needed to be allocated for the transformation context. - * @cra_alignmask: Alignment mask for the input and output data buffer. The data - * buffer containing the input data for the algorithm must be - * aligned to this alignment mask. The data buffer for the - * output data must be aligned to this alignment mask. Note that - * the Crypto API will do the re-alignment in software, but - * only under special conditions and there is a performance hit. - * The re-alignment happens at these occasions for different - * @cra_u types: cipher -- For both input data and output data - * buffer; ahash -- For output hash destination buf; shash -- - * For output hash destination buf. - * This is needed on hardware which is flawed by design and - * cannot pick data from arbitrary addresses. + * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is + * 1 less than the alignment, in bytes, that the algorithm + * implementation requires for input and output buffers. When + * the crypto API is invoked with buffers that are not aligned + * to this alignment, the crypto API automatically utilizes + * appropriately aligned temporary buffers to comply with what + * the algorithm needs. (For scatterlists this happens only if + * the algorithm uses the skcipher_walk helper functions.) This + * misalignment handling carries a performance penalty, so it is + * preferred that algorithms do not set a nonzero alignmask. + * Also, crypto API users may wish to allocate buffers aligned + * to the alignmask of the algorithm being used, in order to + * avoid the API having to realign them. Note: the alignmask is + * not supported for hash algorithms and is always 0 for them. * @cra_priority: Priority of this transformation implementation. In case * multiple transformations with same @cra_name are available to * the Crypto API, the kernel will use the one with highest diff --git a/include/linux/damon.h b/include/linux/damon.h index a953d7083c..e00ddf1ed3 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -40,9 +40,24 @@ struct damon_addr_range { * @ar: The address range of the region. * @sampling_addr: Address of the sample for the next access check. * @nr_accesses: Access frequency of this region. + * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for + * each sampling interval. * @list: List head for siblings. * @age: Age of this region. * + * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be + * increased for every &damon_attrs->sample_interval if an access to the region + * during the last sampling interval is found. The update of this field should + * not be done with direct access but with the helper function, + * damon_update_region_access_rate(). + * + * @nr_accesses_bp is another representation of @nr_accesses in basis point + * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a + * manner similar to moving sum. By the algorithm, this value becomes + * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can + * be used when the aggregation interval is too huge and therefore cannot wait + * for it before getting the access monitoring results. + * * @age is initially zero, increased for each aggregation interval, and reset * to zero again if the access frequency is significantly changed. If two * regions are merged into a new region, both @nr_accesses and @age of the new @@ -52,6 +67,7 @@ struct damon_region { struct damon_addr_range ar; unsigned long sampling_addr; unsigned int nr_accesses; + unsigned int nr_accesses_bp; struct list_head list; unsigned int age; @@ -298,24 +314,24 @@ struct damos_access_pattern { * struct damos - Represents a Data Access Monitoring-based Operation Scheme. * @pattern: Access pattern of target regions. * @action: &damo_action to be applied to the target regions. + * @apply_interval_us: The time between applying the @action. * @quota: Control the aggressiveness of this scheme. * @wmarks: Watermarks for automated (in)activation of this scheme. * @filters: Additional set of &struct damos_filter for &action. * @stat: Statistics of this scheme. * @list: List head for siblings. * - * For each aggregation interval, DAMON finds regions which fit in the + * For each @apply_interval_us, DAMON finds regions which fit in the * &pattern and applies &action to those. To avoid consuming too much * CPU time or IO resources for the &action, "a is used. * + * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead. + * * To do the work only when needed, schemes can be activated for specific * system situations using &wmarks. If all schemes that registered to the * monitoring context are inactive, DAMON stops monitoring either, and just * repeatedly checks the watermarks. * - * If all schemes that registered to a &struct damon_ctx are inactive, DAMON - * stops monitoring and just repeatedly checks the watermarks. - * * Before applying the &action to a memory region, &struct damon_operations * implementation could check pages of the region and skip &action to respect * &filters @@ -327,6 +343,14 @@ struct damos_access_pattern { struct damos { struct damos_access_pattern pattern; enum damos_action action; + unsigned long apply_interval_us; +/* private: internal use only */ + /* + * number of sample intervals that should be passed before applying + * @action + */ + unsigned long next_apply_sis; +/* public: */ struct damos_quota quota; struct damos_watermarks wmarks; struct list_head filters; @@ -472,13 +496,14 @@ struct damon_callback { * regions. * * For each @sample_interval, DAMON checks whether each region is accessed or - * not. It aggregates and keeps the access information (number of accesses to - * each region) for @aggr_interval time. DAMON also checks whether the target - * memory regions need update (e.g., by ``mmap()`` calls from the application, - * in case of virtual memory monitoring) and applies the changes for each - * @ops_update_interval. All time intervals are in micro-seconds. - * Please refer to &struct damon_operations and &struct damon_callback for more - * detail. + * not during the last @sample_interval. If such access is found, DAMON + * aggregates the information by increasing &damon_region->nr_accesses for + * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON + * also checks whether the target memory regions need update (e.g., by + * ``mmap()`` calls from the application, in case of virtual memory monitoring) + * and applies the changes for each @ops_update_interval. All time intervals + * are in micro-seconds. Please refer to &struct damon_operations and &struct + * damon_callback for more detail. */ struct damon_attrs { unsigned long sample_interval; @@ -620,6 +645,8 @@ void damon_add_region(struct damon_region *r, struct damon_target *t); void damon_destroy_region(struct damon_region *r, struct damon_target *t); int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, unsigned int nr_ranges); +void damon_update_region_access_rate(struct damon_region *r, bool accessed, + struct damon_attrs *attrs); struct damos_filter *damos_new_filter(enum damos_filter_type type, bool matching); @@ -627,7 +654,9 @@ void damos_add_filter(struct damos *s, struct damos_filter *f); void damos_destroy_filter(struct damos_filter *f); struct damos *damon_new_scheme(struct damos_access_pattern *pattern, - enum damos_action action, struct damos_quota *quota, + enum damos_action action, + unsigned long apply_interval_us, + struct damos_quota *quota, struct damos_watermarks *wmarks); void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); void damon_destroy_scheme(struct damos *s); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 6b351e009f..3da2f0545d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -251,6 +251,7 @@ extern struct dentry * d_make_root(struct inode *); /* <clickety>-<click> the ramfs-type tree */ extern void d_genocide(struct dentry *); +extern void d_mark_tmpfile(struct file *, struct inode *); extern void d_tmpfile(struct file *, struct inode *); extern struct dentry *d_find_alias(struct inode *); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index ea2d919fd9..c9c65b132c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -171,6 +171,25 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); +/** + * struct debugfs_cancellation - cancellation data + * @list: internal, for keeping track + * @cancel: callback to call + * @cancel_data: extra data for the callback to call + */ +struct debugfs_cancellation { + struct list_head list; + void (*cancel)(struct dentry *, void *); + void *cancel_data; +}; + +void __acquires(cancellation) +debugfs_enter_cancellation(struct file *file, + struct debugfs_cancellation *cancellation); +void __releases(cancellation) +debugfs_leave_cancellation(struct file *file, + struct debugfs_cancellation *cancellation); + #else #include <linux/err.h> diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 69d0435c7e..772ab4d74d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -165,6 +165,7 @@ void dm_error(const char *message); struct dm_dev { struct block_device *bdev; + struct bdev_handle *bdev_handle; struct dax_device *dax_dev; blk_mode_t mode; char name[16]; diff --git a/include/linux/device.h b/include/linux/device.h index 99496a0a5d..6c83294395 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -389,8 +389,8 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data); void devm_release_action(struct device *dev, void (*action)(void *), void *data); int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name); -#define devm_add_action(release, action, data) \ - __devm_add_action(release, action, data, #action) +#define devm_add_action(dev, action, data) \ + __devm_add_action(dev, action, data, #action) static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data, const char *name) @@ -403,8 +403,8 @@ static inline int __devm_add_action_or_reset(struct device *dev, void (*action)( return ret; } -#define devm_add_action_or_reset(release, action, data) \ - __devm_add_action_or_reset(release, action, data, #action) +#define devm_add_action_or_reset(dev, action, data) \ + __devm_add_action_or_reset(dev, action, data, #action) /** * devm_alloc_percpu - Resource-managed alloc_percpu diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index f0ccca16a0..4a658de44e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev); int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); +bool dma_addressing_limited(struct device *dev); size_t dma_max_mapping_size(struct device *dev); size_t dma_opt_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); @@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev) { return 0; } +static inline bool dma_addressing_limited(struct device *dev) +{ + return false; +} static inline size_t dma_max_mapping_size(struct device *dev) { return 0; @@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) return dma_set_mask_and_coherent(dev, mask); } -/** - * dma_addressing_limited - return if the device is addressing limited - * @dev: device to check - * - * Return %true if the devices DMA mask is too small to address all memory in - * the system, else %false. Lack of addressing bits is the prime reason for - * bounce buffering, but might not be the only one. - */ -static inline bool dma_addressing_limited(struct device *dev) -{ - return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < - dma_get_required_mask(dev); -} - static inline unsigned int dma_get_max_seg_size(struct device *dev) { if (dev->dma_parms && dev->dma_parms->max_segment_size) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c3656e5902..752dbde4ce 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -517,8 +517,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan) return dev_name(&chan->dev->device); } -void dma_chan_cleanup(struct kref *kref); - /** * typedef dma_filter_fn - callback filter for dma_request_channel * @chan: channel to be reviewed @@ -955,7 +953,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, static inline bool is_slave_direction(enum dma_transfer_direction direction) { - return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); + return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) || + (direction == DMA_DEV_TO_DEV); } static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( diff --git a/include/linux/dpll.h b/include/linux/dpll.h new file mode 100644 index 0000000000..578fc5fa37 --- /dev/null +++ b/include/linux/dpll.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates + * Copyright (c) 2023 Intel and affiliates + */ + +#ifndef __DPLL_H__ +#define __DPLL_H__ + +#include <uapi/linux/dpll.h> +#include <linux/device.h> +#include <linux/netlink.h> + +struct dpll_device; +struct dpll_pin; + +struct dpll_device_ops { + int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_mode *mode, struct netlink_ext_ack *extack); + bool (*mode_supported)(const struct dpll_device *dpll, void *dpll_priv, + const enum dpll_mode mode, + struct netlink_ext_ack *extack); + int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv, + enum dpll_lock_status *status, + struct netlink_ext_ack *extack); + int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv, + s32 *temp, struct netlink_ext_ack *extack); +}; + +struct dpll_pin_ops { + int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const u64 frequency, + struct netlink_ext_ack *extack); + int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 *frequency, struct netlink_ext_ack *extack); + int (*direction_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const enum dpll_pin_direction direction, + struct netlink_ext_ack *extack); + int (*direction_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + enum dpll_pin_direction *direction, + struct netlink_ext_ack *extack); + int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + enum dpll_pin_state *state, + struct netlink_ext_ack *extack); + int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, enum dpll_pin_state *state, + struct netlink_ext_ack *extack); + int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_pin *parent_pin, + void *parent_pin_priv, + const enum dpll_pin_state state, + struct netlink_ext_ack *extack); + int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, + void *dpll_priv, + const enum dpll_pin_state state, + struct netlink_ext_ack *extack); + int (*prio_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u32 *prio, struct netlink_ext_ack *extack); + int (*prio_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const u32 prio, struct netlink_ext_ack *extack); + int (*phase_offset_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s64 *phase_offset, + struct netlink_ext_ack *extack); + int (*phase_adjust_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + s32 *phase_adjust, + struct netlink_ext_ack *extack); + int (*phase_adjust_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + const s32 phase_adjust, + struct netlink_ext_ack *extack); +}; + +struct dpll_pin_frequency { + u64 min; + u64 max; +}; + +#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \ + { \ + .min = _min, \ + .max = _max, \ + } + +#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val) +#define DPLL_PIN_FREQUENCY_1PPS \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ) +#define DPLL_PIN_FREQUENCY_10MHZ \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ) +#define DPLL_PIN_FREQUENCY_IRIG_B \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ) +#define DPLL_PIN_FREQUENCY_DCF77 \ + DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ) + +struct dpll_pin_phase_adjust_range { + s32 min; + s32 max; +}; + +struct dpll_pin_properties { + const char *board_label; + const char *panel_label; + const char *package_label; + enum dpll_pin_type type; + unsigned long capabilities; + u32 freq_supported_num; + struct dpll_pin_frequency *freq_supported; + struct dpll_pin_phase_adjust_range phase_range; +}; + +#if IS_ENABLED(CONFIG_DPLL) +size_t dpll_msg_pin_handle_size(struct dpll_pin *pin); +int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin); +#else +static inline size_t dpll_msg_pin_handle_size(struct dpll_pin *pin) +{ + return 0; +} + +static inline int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin) +{ + return 0; +} +#endif + +struct dpll_device * +dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module); + +void dpll_device_put(struct dpll_device *dpll); + +int dpll_device_register(struct dpll_device *dpll, enum dpll_type type, + const struct dpll_device_ops *ops, void *priv); + +void dpll_device_unregister(struct dpll_device *dpll, + const struct dpll_device_ops *ops, void *priv); + +struct dpll_pin * +dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module, + const struct dpll_pin_properties *prop); + +int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +void dpll_pin_put(struct dpll_pin *pin); + +int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin, + const struct dpll_pin_ops *ops, void *priv); + +int dpll_device_change_ntf(struct dpll_device *dpll); + +int dpll_pin_change_ntf(struct dpll_pin *pin); + +#endif diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index c177322f79..b9dd35d4b8 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -28,7 +28,7 @@ /* Source and Destination MAC of follow-up meta frames. * Whereas the choice of SMAC only affects the unique identification of the * switch as sender of meta frames, the DMAC must be an address that is present - * in the DSA master port's multicast MAC filter. + * in the DSA conduit port's multicast MAC filter. * 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588 * over L2 use this address for some purpose already. */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 80b21d1c6e..9cc5bf32f6 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -358,13 +358,10 @@ void efi_native_runtime_setup(void); * where the UEFI SPEC breaks the line. */ #define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00) -#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94) -#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) -#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98) #define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2) #define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c) #define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93) @@ -851,10 +848,6 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len) return 1; } -#ifdef CONFIG_EFI_PCDP -extern int __init efi_setup_pcdp_console(char *); -#endif - /* * We play games with efi_enabled so that the compiler will, if * possible, remove EFI-related code altogether. diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 1b523fd485..689028257f 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -1052,4 +1052,23 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add, * next string. */ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...); + +/* Link mode to forced speed capabilities maps */ +struct ethtool_forced_speed_map { + u32 speed; + __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); + + const u32 *cap_arr; + u32 arr_size; +}; + +#define ETHTOOL_FORCED_SPEED_MAP(prefix, value) \ +{ \ + .speed = SPEED_##value, \ + .cap_arr = prefix##_##value, \ + .arr_size = ARRAY_SIZE(prefix##_##value), \ +} + +void +ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size); #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h index 5280194777..d445705ac1 100644 --- a/include/linux/export-internal.h +++ b/include/linux/export-internal.h @@ -53,9 +53,7 @@ " .previous" "\n" \ ) -#ifdef CONFIG_IA64 -#define KSYM_FUNC(name) @fptr(name) -#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT) +#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT) #define KSYM_FUNC(name) P%name #else #define KSYM_FUNC(name) name diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 11fbd0ee13..bb37ad5cc9 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -99,12 +99,29 @@ enum fid_type { FILEID_FAT_WITH_PARENT = 0x72, /* + * 64 bit inode number, 32 bit generation number. + */ + FILEID_INO64_GEN = 0x81, + + /* + * 64 bit inode number, 32 bit generation number, + * 64 bit parent inode number, 32 bit parent generation. + */ + FILEID_INO64_GEN_PARENT = 0x82, + + /* * 128 bit child FID (struct lu_fid) * 128 bit parent FID (struct lu_fid) */ FILEID_LUSTRE = 0x97, /* + * 64 bit inode number, 32 bit subvolume, 32 bit generation number: + */ + FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1, + FILEID_BCACHEFS_WITH_PARENT = 0xb2, + + /* * 64 bit unique kernfs id */ FILEID_KERNFS = 0xfe, @@ -123,7 +140,11 @@ struct fid { u32 parent_ino; u32 parent_gen; } i32; - struct { + struct { + u64 ino; + u32 gen; + } __packed i64; + struct { u32 block; u16 partref; u16 parent_partref; @@ -224,15 +245,56 @@ struct export_operations { atomic attribute updates */ #define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */ +#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */ unsigned long flags; }; +/** + * exportfs_lock_op_is_async() - export op supports async lock operation + * @export_ops: the nfs export operations to check + * + * Returns true if the nfs export_operations structure has + * EXPORT_OP_ASYNC_LOCK in their flags set + */ +static inline bool +exportfs_lock_op_is_async(const struct export_operations *export_ops) +{ + return export_ops->flags & EXPORT_OP_ASYNC_LOCK; +} + extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, int *max_len, struct inode *parent, int flags); extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len, int flags); +static inline bool exportfs_can_encode_fid(const struct export_operations *nop) +{ + return !nop || nop->encode_fh; +} + +static inline bool exportfs_can_decode_fh(const struct export_operations *nop) +{ + return nop && nop->fh_to_dentry; +} + +static inline bool exportfs_can_encode_fh(const struct export_operations *nop, + int fh_flags) +{ + /* + * If a non-decodeable file handle was requested, we only need to make + * sure that filesystem did not opt-out of encoding fid. + */ + if (fh_flags & EXPORT_FH_FID) + return exportfs_can_encode_fid(nop); + + /* + * If a decodeable file handle was requested, we need to make sure that + * filesystem can also decode file handles. + */ + return exportfs_can_decode_fh(nop); +} + static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid, int *max_len) { @@ -252,10 +314,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, /* * Generic helpers for filesystems. */ -extern struct dentry *generic_fh_to_dentry(struct super_block *sb, +int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len, + struct inode *parent); +struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); -extern struct dentry *generic_fh_to_parent(struct super_block *sb, +struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index cf1adceb02..039fe0ce8d 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -13,10 +13,10 @@ #define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ #define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */ -#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ -#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ -#define F2FS_BLKSIZE 4096 /* support only 4KB block */ -#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ +#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */ +#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */ +#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */ +#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ #define F2FS_EXTENSION_LEN 8 /* max size of extension */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) @@ -211,14 +211,14 @@ struct f2fs_checkpoint { unsigned char sit_nat_version_bitmap[]; } __packed; -#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ +#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */ #define CP_MIN_CHKSUM_OFFSET \ (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap)) /* * For orphan inode management */ -#define F2FS_ORPHANS_PER_BLOCK 1020 +#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32)) #define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \ F2FS_ORPHANS_PER_BLOCK) @@ -244,14 +244,31 @@ struct f2fs_extent { #define F2FS_NAME_LEN 255 /* 200 bytes for inline xattrs by default */ #define DEFAULT_INLINE_XATTR_ADDRS 50 -#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ + +#define OFFSET_OF_END_OF_I_EXT 360 +#define SIZE_OF_I_NID 20 + +struct node_footer { + __le32 nid; /* node id */ + __le32 ino; /* inode number */ + __le32 flag; /* include cold/fsync/dentry marks and offset */ + __le64 cp_ver; /* checkpoint version */ + __le32 next_blkaddr; /* next node page block address */ +} __packed; + +/* Address Pointers in an Inode */ +#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \ + - SIZE_OF_I_NID \ + - sizeof(struct node_footer)) / sizeof(__le32)) #define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \ get_extra_isize(inode)) #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ #define ADDRS_PER_INODE(inode) addrs_per_inode(inode) -#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ +/* Address Pointers in a Direct Block */ +#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32)) #define ADDRS_PER_BLOCK(inode) addrs_per_block(inode) -#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ +/* Node IDs in an Indirect Block */ +#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32)) #define ADDRS_PER_PAGE(page, inode) \ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode)) @@ -343,14 +360,6 @@ enum { #define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0) -struct node_footer { - __le32 nid; /* node id */ - __le32 ino; /* inode number */ - __le32 flag; /* include cold/fsync/dentry marks and offset */ - __le64 cp_ver; /* checkpoint version */ - __le32 next_blkaddr; /* next node page block address */ -} __packed; - struct f2fs_node { /* can be one of three types: inode, direct, and indirect types */ union { @@ -364,7 +373,7 @@ struct f2fs_node { /* * For NAT entries */ -#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) +#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry)) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ @@ -379,12 +388,13 @@ struct f2fs_nat_block { /* * For SIT entries * - * Each segment is 2MB in size by default so that a bitmap for validity of - * there-in blocks should occupy 64 bytes, 512 bits. + * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size, + * this results in a segment size of 2MB. For 16k pages, the default segment size + * is 8MB. * Not allow to change this. */ #define SIT_VBLOCK_MAP_SIZE 64 -#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) +#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry)) /* * F2FS uses 4 bytes to represent block address. As a result, supported size of @@ -419,7 +429,7 @@ struct f2fs_sit_block { * For segment summary * * One summary block contains exactly 512 summary entries, which represents - * exactly 2MB segment by default. Not allow to change the basic units. + * exactly one segment by default. Not allow to change the basic units. * * NOTE: For initializing fields, you must use set_summary * @@ -430,12 +440,12 @@ struct f2fs_sit_block { * from node's page's beginning to get a data block address. * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) */ -#define ENTRIES_IN_SUM 512 +#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8) #define SUMMARY_SIZE (7) /* sizeof(struct summary) */ #define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */ #define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) -/* a summary entry for a 4KB-sized block in a segment */ +/* a summary entry for a block in a segment */ struct f2fs_summary { __le32 nid; /* parent node id */ union { @@ -519,7 +529,7 @@ struct f2fs_journal { }; } __packed; -/* 4KB-sized summary block structure */ +/* Block-sized summary block structure */ struct f2fs_summary_block { struct f2fs_summary entries[ENTRIES_IN_SUM]; struct f2fs_journal journal; @@ -560,11 +570,14 @@ typedef __le32 f2fs_hash_t; * Note: there are more reserved space in inline dentry than in regular * dentry, when converting inline dentry we should handle this carefully. */ -#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */ + +/* the number of dentry in a block */ +#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1)) #define SIZE_OF_DIR_ENTRY 11 /* by byte */ #define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ BITS_PER_BYTE) -#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ +#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \ F2FS_SLOT_LEN) * \ NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) #define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */ @@ -577,7 +590,7 @@ struct f2fs_dir_entry { __u8 file_type; /* file type */ } __packed; -/* 4KB-sized directory entry block */ +/* Block-sized directory entry block */ struct f2fs_dentry_block { /* validity bitmap for directory entries in each block */ __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP]; diff --git a/include/linux/fb.h b/include/linux/fb.h index c145764582..94e2c44c65 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -591,8 +591,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, /* fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); extern void unregister_framebuffer(struct fb_info *fb_info); -extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); -extern int fb_show_logo(struct fb_info *fb_info, int rotate); extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, u32 height, u32 shift_high, u32 shift_low, u32 mod); @@ -603,9 +601,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var, extern int fb_get_options(const char *name, char **option); extern int fb_new_modelist(struct fb_info *info); -extern bool fb_center_logo; -extern int fb_logo_count; - static inline void lock_fb_info(struct fb_info *info) { mutex_lock(&info->lock); diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index e066816f35..bc4c3287a6 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -98,20 +98,9 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un return files_lookup_fd_raw(files, fd); } -static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd) -{ - RCU_LOCKDEP_WARN(!rcu_read_lock_held(), - "suspicious rcu_dereference_check() usage"); - return files_lookup_fd_raw(files, fd); -} - -static inline struct file *lookup_fd_rcu(unsigned int fd) -{ - return files_lookup_fd_rcu(current->files, fd); -} - -struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd); -struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd); +struct file *lookup_fdget_rcu(unsigned int fd); +struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd); +struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd); struct task_struct; diff --git a/include/linux/filter.h b/include/linux/filter.h index 77db4263d6..a4953fafc8 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -117,21 +117,25 @@ struct ctl_table_header; /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ -#define BPF_ALU64_IMM(OP, DST, IMM) \ +#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ - .off = 0, \ + .off = OFF, \ .imm = IMM }) +#define BPF_ALU64_IMM(OP, DST, IMM) \ + BPF_ALU64_IMM_OFF(OP, DST, IMM, 0) -#define BPF_ALU32_IMM(OP, DST, IMM) \ +#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \ ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ - .off = 0, \ + .off = OFF, \ .imm = IMM }) +#define BPF_ALU32_IMM(OP, DST, IMM) \ + BPF_ALU32_IMM_OFF(OP, DST, IMM, 0) /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ @@ -143,6 +147,16 @@ struct ctl_table_header; .off = 0, \ .imm = LEN }) +/* Byte Swap, bswap16/32/64 */ + +#define BPF_BSWAP(DST, LEN) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = LEN }) + /* Short form of mov, dst_reg = src_reg */ #define BPF_MOV64_REG(DST, SRC) \ @@ -179,6 +193,24 @@ struct ctl_table_header; .off = 0, \ .imm = IMM }) +/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */ + +#define BPF_MOVSX64_REG(DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +#define BPF_MOVSX32_REG(DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Special form of mov32, used for doing explicit zero extension on dst. */ #define BPF_ZEXT_REG(DST) \ ((struct bpf_insn) { \ @@ -263,6 +295,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = OFF, \ .imm = 0 }) +/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */ + +#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ @@ -694,7 +736,7 @@ static inline void bpf_compute_and_save_data_end( cb->data_end = skb->data + skb_headlen(skb); } -/* Restore data saved by bpf_compute_data_pointers(). */ +/* Restore data saved by bpf_compute_and_save_data_end(). */ static inline void bpf_restore_data_end( struct sk_buff *skb, void *saved_data_end) { @@ -912,6 +954,8 @@ bool bpf_jit_needs_zext(void); bool bpf_jit_supports_subprog_tailcalls(void); bool bpf_jit_supports_kfunc_call(void); bool bpf_jit_supports_far_kfunc_call(void); +bool bpf_jit_supports_exceptions(void); +void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(const struct cred *cred) @@ -981,12 +1025,6 @@ int xdp_do_redirect_frame(struct net_device *dev, struct bpf_prog *prog); void xdp_do_flush(void); -/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as - * it is no longer only flushing maps. Keep this define for compatibility - * until all drivers are updated - do not use xdp_do_flush_map() in new code! - */ -#define xdp_do_flush_map xdp_do_flush - void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act); #ifdef CONFIG_INET @@ -1127,6 +1165,7 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, bool is_bpf_text_address(unsigned long addr); int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym); +struct bpf_prog *bpf_prog_ksym_find(unsigned long addr); static inline const char * bpf_address_lookup(unsigned long addr, unsigned long *size, @@ -1194,6 +1233,11 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, return -ERANGE; } +static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr) +{ + return NULL; +} + static inline const char * bpf_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym) diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h index 95b0da2326..8eaf8922ab 100644 --- a/include/linux/firmware/meson/meson_sm.h +++ b/include/linux/firmware/meson/meson_sm.h @@ -19,7 +19,7 @@ enum { struct meson_sm_firmware; int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index, - u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); + s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer, unsigned int b_size, unsigned int cmd_index, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4); diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h new file mode 100644 index 0000000000..5c28298a98 --- /dev/null +++ b/include/linux/firmware/qcom/qcom_qseecom.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM). + * Responsible for setting up and managing QSEECOM client devices. + * + * Copyright (C) 2023 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef __QCOM_QSEECOM_H +#define __QCOM_QSEECOM_H + +#include <linux/auxiliary_bus.h> +#include <linux/types.h> + +#include <linux/firmware/qcom/qcom_scm.h> + +/** + * struct qseecom_client - QSEECOM client device. + * @aux_dev: Underlying auxiliary device. + * @app_id: ID of the loaded application. + */ +struct qseecom_client { + struct auxiliary_device aux_dev; + u32 app_id; +}; + +/** + * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app. + * @client: The QSEECOM client associated with the target app. + * @req: Request buffer sent to the app (must be DMA-mappable). + * @req_size: Size of the request buffer. + * @rsp: Response buffer, written to by the app (must be DMA-mappable). + * @rsp_size: Size of the response buffer. + * + * Sends a request to the QSEE app associated with the given client and read + * back its response. The caller must provide two DMA memory regions, one for + * the request and one for the response, and fill out the @req region with the + * respective (app-specific) request data. The QSEE app reads this and returns + * its response in the @rsp region. + * + * Note: This is a convenience wrapper around qcom_scm_qseecom_app_send(). + * Clients should prefer to use this wrapper. + * + * Return: Zero on success, nonzero on failure. + */ +static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size, + void *rsp, size_t rsp_size) +{ + return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size); +} + +#endif /* __QCOM_QSEECOM_H */ diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h index 0c091a3f6d..ccaf288460 100644 --- a/include/linux/firmware/qcom/qcom_scm.h +++ b/include/linux/firmware/qcom/qcom_scm.h @@ -59,12 +59,12 @@ enum qcom_scm_ice_cipher { #define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE) #define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC) -extern bool qcom_scm_is_available(void); +bool qcom_scm_is_available(void); -extern int qcom_scm_set_cold_boot_addr(void *entry); -extern int qcom_scm_set_warm_boot_addr(void *entry); -extern void qcom_scm_cpu_power_down(u32 flags); -extern int qcom_scm_set_remote_state(u32 state, u32 id); +int qcom_scm_set_cold_boot_addr(void *entry); +int qcom_scm_set_warm_boot_addr(void *entry); +void qcom_scm_cpu_power_down(u32 flags); +int qcom_scm_set_remote_state(u32 state, u32 id); struct qcom_scm_pas_metadata { void *ptr; @@ -72,54 +72,69 @@ struct qcom_scm_pas_metadata { ssize_t size; }; -extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size, - struct qcom_scm_pas_metadata *ctx); -extern void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx); -extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, - phys_addr_t size); -extern int qcom_scm_pas_auth_and_reset(u32 peripheral); -extern int qcom_scm_pas_shutdown(u32 peripheral); -extern bool qcom_scm_pas_supported(u32 peripheral); - -extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); -extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); - -extern bool qcom_scm_restore_sec_cfg_available(void); -extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); -extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); -extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); -extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); -extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, - u32 cp_nonpixel_start, - u32 cp_nonpixel_size); -extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - u64 *src, - const struct qcom_scm_vmperm *newvm, - unsigned int dest_cnt); - -extern bool qcom_scm_ocmem_lock_available(void); -extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, - u32 size, u32 mode); -extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, - u32 size); - -extern bool qcom_scm_ice_available(void); -extern int qcom_scm_ice_invalidate_key(u32 index); -extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, - enum qcom_scm_ice_cipher cipher, - u32 data_unit_size); - -extern bool qcom_scm_hdcp_available(void); -extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, - u32 *resp); - -extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt); -extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); - -extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, - u64 limit_node, u32 node_id, u64 version); -extern int qcom_scm_lmh_profile_change(u32 profile_id); -extern bool qcom_scm_lmh_dcvsh_available(void); +int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, + struct qcom_scm_pas_metadata *ctx); +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx); +int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); +int qcom_scm_pas_auth_and_reset(u32 peripheral); +int qcom_scm_pas_shutdown(u32 peripheral); +bool qcom_scm_pas_supported(u32 peripheral); + +int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); +int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); + +bool qcom_scm_restore_sec_cfg_available(void); +int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); +int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); +int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); +int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, + u32 cp_nonpixel_start, u32 cp_nonpixel_size); +int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, u64 *src, + const struct qcom_scm_vmperm *newvm, + unsigned int dest_cnt); + +bool qcom_scm_ocmem_lock_available(void); +int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, + u32 mode); +int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size); + +bool qcom_scm_ice_available(void); +int qcom_scm_ice_invalidate_key(u32 index); +int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, u32 data_unit_size); + +bool qcom_scm_hdcp_available(void); +int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); + +int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt); +int qcom_scm_qsmmu500_wait_safe_toggle(bool en); + +int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version); +int qcom_scm_lmh_profile_change(u32 profile_id); +bool qcom_scm_lmh_dcvsh_available(void); + +#ifdef CONFIG_QCOM_QSEECOM + +int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id); +int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp, + size_t rsp_size); + +#else /* CONFIG_QCOM_QSEECOM */ + +static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id) +{ + return -EINVAL; +} + +static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req, + size_t req_size, void *rsp, + size_t rsp_size) +{ + return -EINVAL; +} + +#endif /* CONFIG_QCOM_QSEECOM */ #endif diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index e8b12ec8b0..d1ea389856 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -100,6 +100,18 @@ #define SD_ITAPDLY 0xFF180314 #define SD_OTAPDLYSEL 0xFF180318 +/** + * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error. + */ +#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18) + +/** + * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error. + */ +#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19) +#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13) +#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12) + enum pm_api_cb_id { PM_INIT_SUSPEND_CB = 30, PM_ACKNOWLEDGE_CB = 31, diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index da51a83b28..79ef6ac4c0 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -93,13 +93,9 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #if __has_builtin(__builtin_dynamic_object_size) #define POS __pass_dynamic_object_size(1) #define POS0 __pass_dynamic_object_size(0) -#define __struct_size(p) __builtin_dynamic_object_size(p, 0) -#define __member_size(p) __builtin_dynamic_object_size(p, 1) #else #define POS __pass_object_size(1) #define POS0 __pass_object_size(0) -#define __struct_size(p) __builtin_object_size(p, 0) -#define __member_size(p) __builtin_object_size(p, 1) #endif #define __compiletime_lessthan(bounds, length) ( \ @@ -643,7 +639,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, __q_size_field, #op), \ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \ __fortify_size, \ - "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \ + "field \"" #p "\" at " FILE_LINE, \ __p_size_field); \ __underlying_##op(p, q, __fortify_size); \ }) diff --git a/include/linux/freelist.h b/include/linux/freelist.h deleted file mode 100644 index fc1842b964..0000000000 --- a/include/linux/freelist.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ -#ifndef FREELIST_H -#define FREELIST_H - -#include <linux/atomic.h> - -/* - * Copyright: cameron@moodycamel.com - * - * A simple CAS-based lock-free free list. Not the fastest thing in the world - * under heavy contention, but simple and correct (assuming nodes are never - * freed until after the free list is destroyed), and fairly speedy under low - * contention. - * - * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists - */ - -struct freelist_node { - atomic_t refs; - struct freelist_node *next; -}; - -struct freelist_head { - struct freelist_node *head; -}; - -#define REFS_ON_FREELIST 0x80000000 -#define REFS_MASK 0x7FFFFFFF - -static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list) -{ - /* - * Since the refcount is zero, and nobody can increase it once it's - * zero (except us, and we run only one copy of this method per node at - * a time, i.e. the single thread case), then we know we can safely - * change the next pointer of the node; however, once the refcount is - * back above zero, then other threads could increase it (happens under - * heavy contention, when the refcount goes to zero in between a load - * and a refcount increment of a node in try_get, then back up to - * something non-zero, then the refcount increment is done by the other - * thread) -- so if the CAS to add the node to the actual list fails, - * decrese the refcount and leave the add operation to the next thread - * who puts the refcount back to zero (which could be us, hence the - * loop). - */ - struct freelist_node *head = READ_ONCE(list->head); - - for (;;) { - WRITE_ONCE(node->next, head); - atomic_set_release(&node->refs, 1); - - if (!try_cmpxchg_release(&list->head, &head, node)) { - /* - * Hmm, the add failed, but we can only try again when - * the refcount goes back to zero. - */ - if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1) - continue; - } - return; - } -} - -static inline void freelist_add(struct freelist_node *node, struct freelist_head *list) -{ - /* - * We know that the should-be-on-freelist bit is 0 at this point, so - * it's safe to set it using a fetch_add. - */ - if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) { - /* - * Oh look! We were the last ones referencing this node, and we - * know we want to add it to the free list, so let's do it! - */ - __freelist_add(node, list); - } -} - -static inline struct freelist_node *freelist_try_get(struct freelist_head *list) -{ - struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head); - unsigned int refs; - - while (head) { - prev = head; - refs = atomic_read(&head->refs); - if ((refs & REFS_MASK) == 0 || - !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) { - head = smp_load_acquire(&list->head); - continue; - } - - /* - * Good, reference count has been incremented (it wasn't at - * zero), which means we can read the next and not worry about - * it changing between now and the time we do the CAS. - */ - next = READ_ONCE(head->next); - if (try_cmpxchg_acquire(&list->head, &head, next)) { - /* - * Yay, got the node. This means it was on the list, - * which means should-be-on-freelist must be false no - * matter the refcount (because nobody else knows it's - * been taken off yet, it can't have been put back on). - */ - WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST); - - /* - * Decrease refcount twice, once for our ref, and once - * for the list's ref. - */ - atomic_fetch_add(-2, &head->refs); - - return head; - } - - /* - * OK, the head must have changed on us, but we still need to decrement - * the refcount we increased. - */ - refs = atomic_fetch_add(-1, &prev->refs); - if (refs == REFS_ON_FREELIST + 1) - __freelist_add(prev, list); - } - - return NULL; -} - -#endif /* FREELIST_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index d08b97dacd..7f659c2679 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -67,7 +67,7 @@ struct swap_info_struct; struct seq_file; struct workqueue_struct; struct iov_iter; -struct fscrypt_info; +struct fscrypt_inode_info; struct fscrypt_operations; struct fsverity_info; struct fsverity_operations; @@ -352,6 +352,8 @@ enum rw_hint { * unrelated IO (like cache flushing, new IO generation, etc). */ #define IOCB_DIO_CALLER_COMP (1 << 22) +/* kiocb is a read or write operation submitted by fs/aio.c. */ +#define IOCB_AIO_RW (1 << 23) /* for use in trace events */ #define TRACE_IOCB_STRINGS \ @@ -454,7 +456,7 @@ extern const struct address_space_operations empty_aops; * It is also used to block modification of page cache contents through * memory mappings. * @gfp_mask: Memory allocation flags to use for allocating pages. - * @i_mmap_writable: Number of VM_SHARED mappings. + * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings. * @nr_thps: Number of THPs in the pagecache (non-shmem only). * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. @@ -557,7 +559,7 @@ static inline int mapping_mapped(struct address_space *mapping) /* * Might pages of this file have been modified in userspace? - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap + * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * @@ -671,8 +673,8 @@ struct inode { }; dev_t i_rdev; loff_t i_size; - struct timespec64 i_atime; - struct timespec64 i_mtime; + struct timespec64 __i_atime; + struct timespec64 __i_mtime; struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */ spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; @@ -738,7 +740,7 @@ struct inode { #endif #ifdef CONFIG_FS_ENCRYPTION - struct fscrypt_info *i_crypt_info; + struct fscrypt_inode_info *i_crypt_info; #endif #ifdef CONFIG_FS_VERITY @@ -1042,7 +1044,10 @@ static inline struct file *get_file(struct file *f) atomic_long_inc(&f->f_count); return f; } -#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count) + +struct file *get_file_rcu(struct file __rcu **f); +struct file *get_file_active(struct file **f); + #define file_count(x) atomic_long_read(&(x)->f_count) #define MAX_NON_LFS ((1UL<<31) - 1) @@ -1119,7 +1124,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_NOATIME BIT(10) /* Do not update access times. */ #define SB_NODIRATIME BIT(11) /* Do not update directory access times */ #define SB_SILENT BIT(15) -#define SB_POSIXACL BIT(16) /* VFS does not apply the umask */ +#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */ #define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */ #define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */ #define SB_I_VERSION BIT(23) /* Update inode I_version field */ @@ -1166,6 +1171,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ #define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */ #define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */ +#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */ /* Possible states of 'frozen' field */ enum { @@ -1206,7 +1212,7 @@ struct super_block { #ifdef CONFIG_SECURITY void *s_security; #endif - const struct xattr_handler **s_xattr; + const struct xattr_handler * const *s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */ @@ -1221,6 +1227,7 @@ struct super_block { struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct block_device *s_bdev; + struct bdev_handle *s_bdev_handle; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; @@ -1265,7 +1272,7 @@ struct super_block { const struct dentry_operations *s_d_op; /* default d_op for dentries */ - struct shrinker s_shrink; /* per-sb shrinker handle */ + struct shrinker *s_shrink; /* per-sb shrinker handle */ /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; @@ -1513,23 +1520,23 @@ struct timespec64 inode_set_ctime_current(struct inode *inode); static inline time64_t inode_get_atime_sec(const struct inode *inode) { - return inode->i_atime.tv_sec; + return inode->__i_atime.tv_sec; } static inline long inode_get_atime_nsec(const struct inode *inode) { - return inode->i_atime.tv_nsec; + return inode->__i_atime.tv_nsec; } static inline struct timespec64 inode_get_atime(const struct inode *inode) { - return inode->i_atime; + return inode->__i_atime; } static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode, struct timespec64 ts) { - inode->i_atime = ts; + inode->__i_atime = ts; return ts; } @@ -1543,23 +1550,23 @@ static inline struct timespec64 inode_set_atime(struct inode *inode, static inline time64_t inode_get_mtime_sec(const struct inode *inode) { - return inode->i_mtime.tv_sec; + return inode->__i_mtime.tv_sec; } static inline long inode_get_mtime_nsec(const struct inode *inode) { - return inode->i_mtime.tv_nsec; + return inode->__i_mtime.tv_nsec; } static inline struct timespec64 inode_get_mtime(const struct inode *inode) { - return inode->i_mtime; + return inode->__i_mtime; } static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode, struct timespec64 ts) { - inode->i_mtime = ts; + inode->__i_mtime = ts; return ts; } @@ -2140,7 +2147,12 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) + +#ifdef CONFIG_FS_POSIX_ACL #define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL) +#else +#define IS_POSIXACL(inode) 0 +#endif #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) @@ -2468,7 +2480,7 @@ struct filename { }; static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); -static inline struct mnt_idmap *file_mnt_idmap(struct file *file) +static inline struct mnt_idmap *file_mnt_idmap(const struct file *file) { return mnt_idmap(file->f_path.mnt); } @@ -2507,24 +2519,24 @@ struct file *dentry_open(const struct path *path, int flags, const struct cred *creds); struct file *dentry_create(const struct path *path, int flags, umode_t mode, const struct cred *cred); -struct file *backing_file_open(const struct path *path, int flags, +struct file *backing_file_open(const struct path *user_path, int flags, const struct path *real_path, const struct cred *cred); -struct path *backing_file_real_path(struct file *f); +struct path *backing_file_user_path(struct file *f); /* - * file_real_path - get the path corresponding to f_inode + * file_user_path - get the path to display for memory mapped file * - * When opening a backing file for a stackable filesystem (e.g., - * overlayfs) f_path may be on the stackable filesystem and f_inode on - * the underlying filesystem. When the path associated with f_inode is - * needed, this helper should be used instead of accessing f_path - * directly. -*/ -static inline const struct path *file_real_path(struct file *f) + * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file + * stored in ->vm_file is a backing file whose f_inode is on the underlying + * filesystem. When the mapped file path is displayed to user (e.g. via + * /proc/<pid>/maps), this helper should be used to get the path to display + * to the user, which is the path of the fd that user has requested to map. + */ +static inline const struct path *file_user_path(struct file *f) { if (unlikely(f->f_mode & FMODE_BACKING)) - return backing_file_real_path(f); + return backing_file_user_path(f); return &f->f_path; } diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h index 010d39d0dc..2b1f74b240 100644 --- a/include/linux/fs_stack.h +++ b/include/linux/fs_stack.h @@ -16,14 +16,14 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src); static inline void fsstack_copy_attr_atime(struct inode *dest, const struct inode *src) { - dest->i_atime = src->i_atime; + inode_set_atime_to_ts(dest, inode_get_atime(src)); } static inline void fsstack_copy_attr_times(struct inode *dest, const struct inode *src) { - dest->i_atime = src->i_atime; - dest->i_mtime = src->i_mtime; + inode_set_atime_to_ts(dest, inode_get_atime(src)); + inode_set_mtime_to_ts(dest, inode_get_mtime(src)); inode_set_ctime_to_ts(dest, inode_get_ctime(src)); } diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index c895b12737..12f9e455d5 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -31,7 +31,7 @@ #define FSCRYPT_CONTENTS_ALIGNMENT 16 union fscrypt_policy; -struct fscrypt_info; +struct fscrypt_inode_info; struct fs_parameter; struct seq_file; @@ -59,26 +59,55 @@ struct fscrypt_name { #ifdef CONFIG_FS_ENCRYPTION -/* - * If set, the fscrypt bounce page pool won't be allocated (unless another - * filesystem needs it). Set this if the filesystem always uses its own bounce - * pages for writes and therefore won't need the fscrypt bounce page pool. - */ -#define FS_CFLG_OWN_PAGES (1U << 1) - /* Crypto operations for filesystems */ struct fscrypt_operations { - /* Set of optional flags; see above for allowed flags */ - unsigned int flags; + /* + * If set, then fs/crypto/ will allocate a global bounce page pool the + * first time an encryption key is set up for a file. The bounce page + * pool is required by the following functions: + * + * - fscrypt_encrypt_pagecache_blocks() + * - fscrypt_zeroout_range() for files not using inline crypto + * + * If the filesystem doesn't use those, it doesn't need to set this. + */ + unsigned int needs_bounce_pages : 1; /* - * If set, this is a filesystem-specific key description prefix that - * will be accepted for "logon" keys for v1 fscrypt policies, in - * addition to the generic prefix "fscrypt:". This functionality is - * deprecated, so new filesystems shouldn't set this field. + * If set, then fs/crypto/ will allow the use of encryption settings + * that assume inode numbers fit in 32 bits (i.e. + * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other + * prerequisites for these settings are also met. This is only useful + * if the filesystem wants to support inline encryption hardware that is + * limited to 32-bit or 64-bit data unit numbers and where programming + * keyslots is very slow. */ - const char *key_prefix; + unsigned int has_32bit_inodes : 1; + + /* + * If set, then fs/crypto/ will allow users to select a crypto data unit + * size that is less than the filesystem block size. This is done via + * the log2_data_unit_size field of the fscrypt policy. This flag is + * not compatible with filesystems that encrypt variable-length blocks + * (i.e. blocks that aren't all equal to filesystem's block size), for + * example as a result of compression. It's also not compatible with + * the fscrypt_encrypt_block_inplace() and + * fscrypt_decrypt_block_inplace() functions. + */ + unsigned int supports_subblock_data_units : 1; + + /* + * This field exists only for backwards compatibility reasons and should + * only be set by the filesystems that are setting it already. It + * contains the filesystem-specific key description prefix that is + * accepted for "logon" keys for v1 fscrypt policies. This + * functionality is deprecated in favor of the generic prefix + * "fscrypt:", which itself is deprecated in favor of the filesystem + * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that + * are newly adding fscrypt support should not set this field. + */ + const char *legacy_key_prefix; /* * Get the fscrypt context of the given inode. @@ -146,21 +175,6 @@ struct fscrypt_operations { bool (*has_stable_inodes)(struct super_block *sb); /* - * Get the number of bits that the filesystem uses to represent inode - * numbers and file logical block numbers. - * - * By default, both of these are assumed to be 64-bit. This function - * can be implemented to declare that either or both of these numbers is - * shorter, which may allow the use of the - * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of - * inline crypto hardware whose maximum DUN length is less than 64 bits - * (e.g., eMMC v5.2 spec compliant hardware). This function only needs - * to be implemented if support for one of these features is needed. - */ - void (*get_ino_and_lblk_bits)(struct super_block *sb, - int *ino_bits_ret, int *lblk_bits_ret); - - /* * Return an array of pointers to the block devices to which the * filesystem may write encrypted file contents, NULL if the filesystem * only has a single such block device, or an ERR_PTR() on error. @@ -178,7 +192,8 @@ struct fscrypt_operations { unsigned int *num_devs); }; -static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) +static inline struct fscrypt_inode_info * +fscrypt_get_inode_info(const struct inode *inode) { /* * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info(). @@ -390,7 +405,8 @@ static inline void fscrypt_set_ops(struct super_block *sb, } #else /* !CONFIG_FS_ENCRYPTION */ -static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) +static inline struct fscrypt_inode_info * +fscrypt_get_inode_info(const struct inode *inode) { return NULL; } @@ -868,7 +884,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) */ static inline bool fscrypt_has_encryption_key(const struct inode *inode) { - return fscrypt_get_info(inode) != NULL; + return fscrypt_get_inode_info(inode) != NULL; } /** diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index ed48e4f1e7..bcb6609b54 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -96,8 +96,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask) if (file->f_mode & FMODE_NONOTIFY) return 0; - /* Overlayfs internal files have fake f_path */ - path = file_real_path(file); + path = &file->f_path; return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h new file mode 100644 index 0000000000..ca49947f0a --- /dev/null +++ b/include/linux/fw_table.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by + * platform or device firmware + * + * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2023 Intel Corp. + */ +#ifndef _FW_TABLE_H_ +#define _FW_TABLE_H_ + +union acpi_subtable_headers; + +typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header, + const unsigned long end); + +typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header, + void *arg, const unsigned long end); + +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + acpi_tbl_entry_handler_arg handler_arg; + void *arg; + int count; +}; + +union acpi_subtable_headers { + struct acpi_subtable_header common; + struct acpi_hmat_structure hmat; + struct acpi_prmt_module_header prmt; + struct acpi_cedt_header cedt; +}; + +int acpi_parse_entries_array(char *id, unsigned long table_size, + struct acpi_table_header *table_header, + struct acpi_subtable_proc *proc, + int proc_num, unsigned int max_entries); + +#endif diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 5700451b30..2a72f55d26 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -41,6 +41,8 @@ struct device; struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; + + /* The below is used solely by device links, don't use otherwise */ struct device *dev; struct list_head suppliers; struct list_head consumers; diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index f6cd0f909d..8474131647 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -117,6 +117,11 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size) #define __genradix_cast(_radix) (typeof((_radix)->type[0]) *) #define __genradix_obj_size(_radix) sizeof((_radix)->type[0]) +#define __genradix_objs_per_page(_radix) \ + (PAGE_SIZE / sizeof((_radix)->type[0])) +#define __genradix_page_remainder(_radix) \ + (PAGE_SIZE % sizeof((_radix)->type[0])) + #define __genradix_idx_to_offset(_radix, _idx) \ __idx_to_offset(_idx, __genradix_obj_size(_radix)) @@ -180,7 +185,25 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t); #define genradix_iter_peek(_iter, _radix) \ (__genradix_cast(_radix) \ __genradix_iter_peek(_iter, &(_radix)->tree, \ - PAGE_SIZE / __genradix_obj_size(_radix))) + __genradix_objs_per_page(_radix))) + +void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *, + size_t, size_t); + +/** + * genradix_iter_peek_prev - get first entry at or below iterator's current + * position + * @_iter: a genradix_iter + * @_radix: genradix being iterated over + * + * If no more entries exist at or below @_iter's current position, returns NULL + */ +#define genradix_iter_peek_prev(_iter, _radix) \ + (__genradix_cast(_radix) \ + __genradix_iter_peek_prev(_iter, &(_radix)->tree, \ + __genradix_objs_per_page(_radix), \ + __genradix_obj_size(_radix) + \ + __genradix_page_remainder(_radix))) static inline void __genradix_iter_advance(struct genradix_iter *iter, size_t obj_size) @@ -203,6 +226,25 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter, #define genradix_iter_advance(_iter, _radix) \ __genradix_iter_advance(_iter, __genradix_obj_size(_radix)) +static inline void __genradix_iter_rewind(struct genradix_iter *iter, + size_t obj_size) +{ + if (iter->offset == 0 || + iter->offset == SIZE_MAX) { + iter->offset = SIZE_MAX; + return; + } + + if ((iter->offset & (PAGE_SIZE - 1)) == 0) + iter->offset -= PAGE_SIZE % obj_size; + + iter->offset -= obj_size; + iter->pos--; +} + +#define genradix_iter_rewind(_iter, _radix) \ + __genradix_iter_rewind(_iter, __genradix_obj_size(_radix)) + #define genradix_for_each_from(_radix, _iter, _p, _start) \ for (_iter = genradix_iter_init(_radix, _start); \ (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \ @@ -220,6 +262,23 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter, #define genradix_for_each(_radix, _iter, _p) \ genradix_for_each_from(_radix, _iter, _p, 0) +#define genradix_last_pos(_radix) \ + (SIZE_MAX / PAGE_SIZE * __genradix_objs_per_page(_radix) - 1) + +/** + * genradix_for_each_reverse - iterate over entry in a genradix, reverse order + * @_radix: genradix to iterate over + * @_iter: a genradix_iter to track current position + * @_p: pointer to genradix entry type + * + * On every iteration, @_p will point to the current entry, and @_iter.pos + * will be the current entry's index. + */ +#define genradix_for_each_reverse(_radix, _iter, _p) \ + for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\ + (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\ + genradix_iter_rewind(&_iter, _radix)) + int __genradix_prealloc(struct __genradix *, size_t, gfp_t); /** diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 665f06675c..de292a0071 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -8,6 +8,7 @@ #include <linux/topology.h> struct vm_area_struct; +struct mempolicy; /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) @@ -262,7 +263,9 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, #ifdef CONFIG_NUMA struct page *alloc_pages(gfp_t gfp, unsigned int order); -struct folio *folio_alloc(gfp_t gfp, unsigned order); +struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, + struct mempolicy *mpol, pgoff_t ilx, int nid); +struct folio *folio_alloc(gfp_t gfp, unsigned int order); struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); #else @@ -270,6 +273,11 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) { return alloc_pages_node(numa_node_id(), gfp_mask, order); } +static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, + struct mempolicy *mpol, pgoff_t ilx, int nid) +{ + return alloc_pages(gfp, order); +} static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) { return __folio_alloc_node(gfp, order, numa_node_id()); @@ -320,11 +328,13 @@ extern void page_frag_free(void *addr); #define free_page(addr) free_pages((addr), 0) void page_alloc_init_cpuhp(void); +int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); void page_alloc_init_late(void); +void setup_pcp_cacheinfo(void); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 1c4385a00f..db2dfbae8e 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -159,7 +159,6 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, int gpiod_set_config(struct gpio_desc *desc, unsigned long config); int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce); -int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); void gpiod_toggle_active_low(struct gpio_desc *desc); int gpiod_is_active_low(const struct gpio_desc *desc); @@ -494,13 +493,6 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int deboun return -ENOSYS; } -static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) -{ - /* GPIO can never have been requested */ - WARN_ON(desc); - return -ENOSYS; -} - static inline void gpiod_toggle_active_low(struct gpio_desc *desc) { /* GPIO can never have been requested */ @@ -614,8 +606,6 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev); int devm_acpi_dev_add_driver_gpios(struct device *dev, const struct acpi_gpio_mapping *gpios); -struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label); - #else /* CONFIG_GPIOLIB && CONFIG_ACPI */ #include <linux/err.h> @@ -633,12 +623,6 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, return -ENXIO; } -static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, - char *label) -{ - return ERR_PTR(-ENOSYS); -} - #endif /* CONFIG_GPIOLIB && CONFIG_ACPI */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index d6e38a5008..725e7eca99 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -3,6 +3,8 @@ #define __LINUX_GPIO_DRIVER_H #include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/err.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/irqhandler.h> @@ -529,8 +531,7 @@ struct gpio_chip { #endif /* CONFIG_OF_GPIO */ }; -extern const char *gpiochip_is_requested(struct gpio_chip *gc, - unsigned int offset); +const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset); /** * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range @@ -549,9 +550,9 @@ extern const char *gpiochip_is_requested(struct gpio_chip *gc, for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label) /* add/remove chips */ -extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, - struct lock_class_key *lock_key, - struct lock_class_key *request_key); +int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, + struct lock_class_key *lock_key, + struct lock_class_key *request_key); /** * gpiochip_add_data() - register a gpio_chip @@ -599,20 +600,24 @@ static inline int gpiochip_add(struct gpio_chip *gc) { return gpiochip_add_data(gc, NULL); } -extern void gpiochip_remove(struct gpio_chip *gc); -extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data, - struct lock_class_key *lock_key, - struct lock_class_key *request_key); - -extern struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *gc, void *data)); +void gpiochip_remove(struct gpio_chip *gc); +int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, + void *data, struct lock_class_key *lock_key, + struct lock_class_key *request_key); struct gpio_device *gpio_device_find(void *data, int (*match)(struct gpio_chip *gc, void *data)); +struct gpio_device *gpio_device_find_by_label(const char *label); +struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode); struct gpio_device *gpio_device_get(struct gpio_device *gdev); void gpio_device_put(struct gpio_device *gdev); +DEFINE_FREE(gpio_device_put, struct gpio_device *, + if (!IS_ERR_OR_NULL(_T)) gpio_device_put(_T)) + +struct device *gpio_device_to_device(struct gpio_device *gdev); + bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset); int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset); void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset); @@ -764,14 +769,23 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); +struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum); +struct gpio_desc * +gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum); + +struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev); + #ifdef CONFIG_GPIOLIB /* lock/unlock as IRQ */ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset); void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset); - struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); +struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc); + +/* struct gpio_device getters */ +int gpio_device_get_base(struct gpio_device *gdev); #else /* CONFIG_GPIOLIB */ @@ -786,6 +800,18 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) return ERR_PTR(-ENODEV); } +static inline int gpio_device_get_base(struct gpio_device *gdev) +{ + WARN_ON(1); + return -ENODEV; +} + +static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc) +{ + WARN_ON(1); + return ERR_PTR(-ENODEV); +} + static inline int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) { diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h new file mode 100644 index 0000000000..86ea7c63a0 --- /dev/null +++ b/include/linux/habanalabs/cpucp_if.h @@ -0,0 +1,1417 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2020-2023 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef CPUCP_IF_H +#define CPUCP_IF_H + +#include <linux/types.h> +#include <linux/if_ether.h> + +#include "hl_boot_if.h" + +#define NUM_HBM_PSEUDO_CH 2 +#define NUM_HBM_CH_PER_DEV 8 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0 + +#define PLL_MAP_MAX_BITS 128 +#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8) + +enum eq_event_id { + EQ_EVENT_NIC_STS_REQUEST = 0, + EQ_EVENT_PWR_MODE_0, + EQ_EVENT_PWR_MODE_1, + EQ_EVENT_PWR_MODE_2, + EQ_EVENT_PWR_MODE_3, + EQ_EVENT_PWR_BRK_ENTRY, + EQ_EVENT_PWR_BRK_EXIT, + EQ_EVENT_HEARTBEAT, +}; + +/* + * info of the pkt queue pointers in the first async occurrence + */ +struct cpucp_pkt_sync_err { + __le32 pi; + __le32 ci; +}; + +struct hl_eq_hbm_ecc_data { + /* SERR counter */ + __le32 sec_cnt; + /* DERR counter */ + __le32 dec_cnt; + /* Supplemental Information according to the mask bits */ + __le32 hbm_ecc_info; + /* Address in hbm where the ecc happened */ + __le32 first_addr; + /* SERR continuous address counter */ + __le32 sec_cont_cnt; + __le32 pad; +}; + +/* + * EVENT QUEUE + */ + +struct hl_eq_header { + __le32 reserved; + __le32 ctl; +}; + +struct hl_eq_ecc_data { + __le64 ecc_address; + __le64 ecc_syndrom; + __u8 memory_wrapper_idx; + __u8 is_critical; + __le16 block_id; + __u8 pad[4]; +}; + +enum hl_sm_sei_cause { + SM_SEI_SO_OVERFLOW, + SM_SEI_LBW_4B_UNALIGNED, + SM_SEI_AXI_RESPONSE_ERR +}; + +struct hl_eq_sm_sei_data { + __le32 sei_log; + /* enum hl_sm_sei_cause */ + __u8 sei_cause; + __u8 pad[3]; +}; + +enum hl_fw_alive_severity { + FW_ALIVE_SEVERITY_MINOR, + FW_ALIVE_SEVERITY_CRITICAL +}; + +struct hl_eq_fw_alive { + __le64 uptime_seconds; + __le32 process_id; + __le32 thread_id; + /* enum hl_fw_alive_severity */ + __u8 severity; + __u8 pad[7]; +}; + +struct hl_eq_intr_cause { + __le64 intr_cause_data; +}; + +struct hl_eq_pcie_drain_ind_data { + struct hl_eq_intr_cause intr_cause; + __le64 drain_wr_addr_lbw; + __le64 drain_rd_addr_lbw; + __le64 drain_wr_addr_hbw; + __le64 drain_rd_addr_hbw; +}; + +struct hl_eq_razwi_lbw_info_regs { + __le32 rr_aw_razwi_reg; + __le32 rr_aw_razwi_id_reg; + __le32 rr_ar_razwi_reg; + __le32 rr_ar_razwi_id_reg; +}; + +struct hl_eq_razwi_hbw_info_regs { + __le32 rr_aw_razwi_hi_reg; + __le32 rr_aw_razwi_lo_reg; + __le32 rr_aw_razwi_id_reg; + __le32 rr_ar_razwi_hi_reg; + __le32 rr_ar_razwi_lo_reg; + __le32 rr_ar_razwi_id_reg; +}; + +/* razwi_happened masks */ +#define RAZWI_HAPPENED_HBW 0x1 +#define RAZWI_HAPPENED_LBW 0x2 +#define RAZWI_HAPPENED_AW 0x4 +#define RAZWI_HAPPENED_AR 0x8 + +struct hl_eq_razwi_info { + __le32 razwi_happened_mask; + union { + struct hl_eq_razwi_lbw_info_regs lbw; + struct hl_eq_razwi_hbw_info_regs hbw; + }; + __le32 pad; +}; + +struct hl_eq_razwi_with_intr_cause { + struct hl_eq_razwi_info razwi_info; + struct hl_eq_intr_cause intr_cause; +}; + +#define HBM_CA_ERR_CMD_LIFO_LEN 8 +#define HBM_RD_ERR_DATA_LIFO_LEN 8 +#define HBM_WR_PAR_CMD_LIFO_LEN 11 + +enum hl_hbm_sei_cause { + /* Command/address parity error event is split into 2 events due to + * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix + * for even HBM CK_t cycles + */ + HBM_SEI_CMD_PARITY_EVEN, + HBM_SEI_CMD_PARITY_ODD, + /* Read errors can be reflected as a combination of SERR/DERR/parity + * errors. Therefore, we define one event for all read error types. + * LKD will perform further proccessing. + */ + HBM_SEI_READ_ERR, + HBM_SEI_WRITE_DATA_PARITY_ERR, + HBM_SEI_CATTRIP, + HBM_SEI_MEM_BIST_FAIL, + HBM_SEI_DFI, + HBM_SEI_INV_TEMP_READ_OUT, + HBM_SEI_BIST_FAIL, +}; + +/* Masks for parsing hl_hbm_sei_headr fields */ +#define HBM_ECC_SERR_CNTR_MASK 0xFF +#define HBM_ECC_DERR_CNTR_MASK 0xFF00 +#define HBM_RD_PARITY_CNTR_MASK 0xFF0000 + +/* HBM index and MC index are known by the event_id */ +struct hl_hbm_sei_header { + union { + /* relevant only in case of HBM read error */ + struct { + __u8 ecc_serr_cnt; + __u8 ecc_derr_cnt; + __u8 read_par_cnt; + __u8 reserved; + }; + /* All other cases */ + __le32 cnt; + }; + __u8 sei_cause; /* enum hl_hbm_sei_cause */ + __u8 mc_channel; /* range: 0-3 */ + __u8 mc_pseudo_channel; /* range: 0-7 */ + __u8 is_critical; +}; + +#define HBM_RD_ADDR_SID_SHIFT 0 +#define HBM_RD_ADDR_SID_MASK 0x1 +#define HBM_RD_ADDR_BG_SHIFT 1 +#define HBM_RD_ADDR_BG_MASK 0x6 +#define HBM_RD_ADDR_BA_SHIFT 3 +#define HBM_RD_ADDR_BA_MASK 0x18 +#define HBM_RD_ADDR_COL_SHIFT 5 +#define HBM_RD_ADDR_COL_MASK 0x7E0 +#define HBM_RD_ADDR_ROW_SHIFT 11 +#define HBM_RD_ADDR_ROW_MASK 0x3FFF800 + +struct hbm_rd_addr { + union { + /* bit fields are only for FW use */ + struct { + u32 dbg_rd_err_addr_sid:1; + u32 dbg_rd_err_addr_bg:2; + u32 dbg_rd_err_addr_ba:2; + u32 dbg_rd_err_addr_col:6; + u32 dbg_rd_err_addr_row:15; + u32 reserved:6; + }; + __le32 rd_addr_val; + }; +}; + +#define HBM_RD_ERR_BEAT_SHIFT 2 +/* dbg_rd_err_misc fields: */ +/* Read parity is calculated per DW on every beat */ +#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0 +#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3 +#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8 +#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300 +/* ECC is calculated per PC on every beat */ +#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16 +#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000 +#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24 +#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000 + +struct hl_eq_hbm_sei_read_err_intr_info { + /* DFI_RD_ERR_REP_ADDR */ + struct hbm_rd_addr dbg_rd_err_addr; + /* DFI_RD_ERR_REP_ERR */ + union { + struct { + /* bit fields are only for FW use */ + u32 dbg_rd_err_par:8; + u32 dbg_rd_err_par_data:8; + u32 dbg_rd_err_serr:4; + u32 dbg_rd_err_derr:4; + u32 reserved:8; + }; + __le32 dbg_rd_err_misc; + }; + /* DFI_RD_ERR_REP_DM */ + __le32 dbg_rd_err_dm; + /* DFI_RD_ERR_REP_SYNDROME */ + __le32 dbg_rd_err_syndrome; + /* DFI_RD_ERR_REP_DATA */ + __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN]; +}; + +struct hl_eq_hbm_sei_ca_par_intr_info { + /* 14 LSBs */ + __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN]; + /* 18 LSBs */ + __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN]; +}; + +#define WR_PAR_LAST_CMD_COL_SHIFT 0 +#define WR_PAR_LAST_CMD_COL_MASK 0x3F +#define WR_PAR_LAST_CMD_BG_SHIFT 6 +#define WR_PAR_LAST_CMD_BG_MASK 0xC0 +#define WR_PAR_LAST_CMD_BA_SHIFT 8 +#define WR_PAR_LAST_CMD_BA_MASK 0x300 +#define WR_PAR_LAST_CMD_SID_SHIFT 10 +#define WR_PAR_LAST_CMD_SID_MASK 0x400 + +/* Row address isn't latched */ +struct hbm_sei_wr_cmd_address { + /* DFI_DERR_LAST_CMD */ + union { + struct { + /* bit fields are only for FW use */ + u32 col:6; + u32 bg:2; + u32 ba:2; + u32 sid:1; + u32 reserved:21; + }; + __le32 dbg_wr_cmd_addr; + }; +}; + +struct hl_eq_hbm_sei_wr_par_intr_info { + /* entry 0: WR command address from the 1st cycle prior to the error + * entry 1: WR command address from the 2nd cycle prior to the error + * and so on... + */ + struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN]; + /* derr[0:1] - 1st HBM cycle DERR output + * derr[2:3] - 2nd HBM cycle DERR output + */ + __u8 dbg_derr; + /* extend to reach 8B */ + __u8 pad[3]; +}; + +/* + * this struct represents the following sei causes: + * command parity, ECC double error, ECC single error, dfi error, cattrip, + * temperature read-out, read parity error and write parity error. + * some only use the header while some have extra data. + */ +struct hl_eq_hbm_sei_data { + struct hl_hbm_sei_header hdr; + union { + struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info; + struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info; + struct hl_eq_hbm_sei_read_err_intr_info read_err_info; + struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info; + }; +}; + +/* Engine/farm arc interrupt type */ +enum hl_engine_arc_interrupt_type { + /* Qman/farm ARC DCCM QUEUE FULL interrupt type */ + ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1 +}; + +/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */ +struct hl_engine_arc_dccm_queue_full_irq { + /* Queue index value which caused DCCM QUEUE FULL */ + __le32 queue_index; + __le32 pad; +}; + +/* Data structure specifies details of QM/FARM ARC interrupt */ +struct hl_eq_engine_arc_intr_data { + /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */ + __le32 engine_id; + __le32 intr_type; /* enum hl_engine_arc_interrupt_type */ + /* More info related to the interrupt e.g. queue index + * incase of DCCM_QUEUE_FULL interrupt. + */ + __le64 payload; + __le64 pad[5]; +}; + +#define ADDR_DEC_ADDRESS_COUNT_MAX 4 + +/* Data structure specifies details of ADDR_DEC interrupt */ +struct hl_eq_addr_dec_intr_data { + struct hl_eq_intr_cause intr_cause; + __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX]; + __u8 addr_cnt; + __u8 pad[7]; +}; + +struct hl_eq_entry { + struct hl_eq_header hdr; + union { + __le64 data_placeholder; + struct hl_eq_ecc_data ecc_data; + struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */ + struct hl_eq_sm_sei_data sm_sei_data; + struct cpucp_pkt_sync_err pkt_sync_err; + struct hl_eq_fw_alive fw_alive; + struct hl_eq_intr_cause intr_cause; + struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data; + struct hl_eq_razwi_info razwi_info; + struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause; + struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */ + struct hl_eq_engine_arc_intr_data arc_data; + struct hl_eq_addr_dec_intr_data addr_dec; + __le64 data[7]; + }; +}; + +#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry) + +#define EQ_CTL_READY_SHIFT 31 +#define EQ_CTL_READY_MASK 0x80000000 + +#define EQ_CTL_EVENT_TYPE_SHIFT 16 +#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000 + +#define EQ_CTL_INDEX_SHIFT 0 +#define EQ_CTL_INDEX_MASK 0x0000FFFF + +enum pq_init_status { + PQ_INIT_STATUS_NA = 0, + PQ_INIT_STATUS_READY_FOR_CP, + PQ_INIT_STATUS_READY_FOR_HOST, + PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI, + PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR, + PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR +}; + +/* + * CpuCP Primary Queue Packets + * + * During normal operation, the host's kernel driver needs to send various + * messages to CpuCP, usually either to SET some value into a H/W periphery or + * to GET the current value of some H/W periphery. For example, SET the + * frequency of MME/TPC and GET the value of the thermal sensor. + * + * These messages can be initiated either by the User application or by the + * host's driver itself, e.g. power management code. In either case, the + * communication from the host's driver to CpuCP will *always* be in + * synchronous mode, meaning that the host will send a single message and poll + * until the message was acknowledged and the results are ready (if results are + * needed). + * + * This means that only a single message can be sent at a time and the host's + * driver must wait for its result before sending the next message. Having said + * that, because these are control messages which are sent in a relatively low + * frequency, this limitation seems acceptable. It's important to note that + * in case of multiple devices, messages to different devices *can* be sent + * at the same time. + * + * The message, inputs/outputs (if relevant) and fence object will be located + * on the device DDR at an address that will be determined by the host's driver. + * During device initialization phase, the host will pass to CpuCP that address. + * Most of the message types will contain inputs/outputs inside the message + * itself. The common part of each message will contain the opcode of the + * message (its type) and a field representing a fence object. + * + * When the host's driver wishes to send a message to CPU CP, it will write the + * message contents to the device DDR, clear the fence object and then write to + * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU. + * + * Upon receiving the interrupt (#121), CpuCP will read the message from the + * DDR. In case the message is a SET operation, CpuCP will first perform the + * operation and then write to the fence object on the device DDR. In case the + * message is a GET operation, CpuCP will first fill the results section on the + * device DDR and then write to the fence object. If an error occurred, CpuCP + * will fill the rc field with the right error code. + * + * In the meantime, the host's driver will poll on the fence object. Once the + * host sees that the fence object is signaled, it will read the results from + * the device DDR (if relevant) and resume the code execution in the host's + * driver. + * + * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8 + * so the value being put by the host's driver matches the value read by CpuCP + * + * Non-QMAN packets should be limited to values 1 through (2^8 - 1) + * + * Detailed description: + * + * CPUCP_PACKET_DISABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU must NOT issue PCI + * transactions (read/write) towards the Host CPU. This also include + * sending MSI-X interrupts. + * This packet is usually sent before the device is moved to D3Hot state. + * + * CPUCP_PACKET_ENABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU is allowed to issue PCI + * transactions towards the Host CPU, including sending MSI-X interrupts. + * This packet is usually send after the device is moved to D0 state. + * + * CPUCP_PACKET_TEMPERATURE_GET - + * Fetch the current temperature / Max / Max Hyst / Critical / + * Critical Hyst of a specified thermal sensor. The packet's + * arguments specify the desired sensor and the field to get. + * + * CPUCP_PACKET_VOLTAGE_GET - + * Fetch the voltage / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_CURRENT_GET - + * Fetch the current / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_FAN_SPEED_GET - + * Fetch the speed / Max / Min of a specified fan. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_PWM_GET - + * Fetch the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor and type. + * + * CPUCP_PACKET_PWM_SET - + * Set the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor, type and value. + * + * CPUCP_PACKET_FREQUENCY_SET - + * Set the frequency of a specified PLL. The packet's arguments specify + * the PLL and the desired frequency. The actual frequency in the device + * might differ from the requested frequency. + * + * CPUCP_PACKET_FREQUENCY_GET - + * Fetch the frequency of a specified PLL. The packet's arguments specify + * the PLL. + * + * CPUCP_PACKET_LED_SET - + * Set the state of a specified led. The packet's arguments + * specify the led and the desired state. + * + * CPUCP_PACKET_I2C_WR - + * Write 32-bit value to I2C device. The packet's arguments specify the + * I2C bus, address and value. + * + * CPUCP_PACKET_I2C_RD - + * Read 32-bit value from I2C device. The packet's arguments specify the + * I2C bus and address. + * + * CPUCP_PACKET_INFO_GET - + * Fetch information from the device as specified in the packet's + * structure. The host's driver passes the max size it allows the CpuCP to + * write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. + * + * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed + * + * CPUCP_PACKET_UNMASK_RAZWI_IRQ - + * Unmask the given IRQ. The IRQ number is specified in the value field. + * The packet is sent after receiving an interrupt and printing its + * relevant information. + * + * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY - + * Unmask the given IRQs. The IRQs numbers are specified in an array right + * after the cpucp_packet structure, where its first element is the array + * length. The packet is sent after a soft reset was done in order to + * handle any interrupts that were sent during the reset process. + * + * CPUCP_PACKET_TEST - + * Test packet for CpuCP connectivity. The CPU will put the fence value + * in the result field. + * + * CPUCP_PACKET_FREQUENCY_CURR_GET - + * Fetch the current frequency of a specified PLL. The packet's arguments + * specify the PLL. + * + * CPUCP_PACKET_MAX_POWER_GET - + * Fetch the maximal power of the device. + * + * CPUCP_PACKET_MAX_POWER_SET - + * Set the maximal power of the device. The packet's arguments specify + * the power. + * + * CPUCP_PACKET_EEPROM_DATA_GET - + * Get EEPROM data from the CpuCP kernel. The buffer is specified in the + * addr field. The CPU will put the returned data size in the result + * field. In addition, the host's driver passes the max size it allows the + * CpuCP to write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. + * + * CPUCP_PACKET_NIC_INFO_GET - + * Fetch information from the device regarding the NIC. the host's driver + * passes the max size it allows the CpuCP to write to the structure, to + * prevent data corruption in case of mismatched driver/FW versions. + * + * CPUCP_PACKET_TEMPERATURE_SET - + * Set the value of the offset property of a specified thermal sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_VOLTAGE_SET - + * Trigger the reset_history property of a specified voltage sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_CURRENT_SET - + * Trigger the reset_history property of a specified current sensor. + * The packet's arguments specify the desired sensor and the field to + * set. + * + * CPUCP_PACKET_PCIE_THROUGHPUT_GET - + * Get throughput of PCIe. + * The packet's arguments specify the transaction direction (TX/RX). + * The window measurement is 10[msec], and the return value is in KB/sec. + * + * CPUCP_PACKET_PCIE_REPLAY_CNT_GET + * Replay count measures number of "replay" events, which is basicly + * number of retries done by PCIe. + * + * CPUCP_PACKET_TOTAL_ENERGY_GET - + * Total Energy is measurement of energy from the time FW Linux + * is loaded. It is calculated by multiplying the average power + * by time (passed from armcp start). The units are in MilliJouls. + * + * CPUCP_PACKET_PLL_INFO_GET - + * Fetch frequencies of PLL from the required PLL IP. + * The packet's arguments specify the device PLL type + * Pll type is the PLL from device pll_index enum. + * The result is composed of 4 outputs, each is 16-bit + * frequency in MHz. + * + * CPUCP_PACKET_POWER_GET - + * Fetch the present power consumption of the device (Current * Voltage). + * + * CPUCP_PACKET_NIC_PFC_SET - + * Enable/Disable the NIC PFC feature. The packet's arguments specify the + * NIC port, relevant lanes to configure and one bit indication for + * enable/disable. + * + * CPUCP_PACKET_NIC_FAULT_GET - + * Fetch the current indication for local/remote faults from the NIC MAC. + * The result is 32-bit value of the relevant register. + * + * CPUCP_PACKET_NIC_LPBK_SET - + * Enable/Disable the MAC loopback feature. The packet's arguments specify + * the NIC port, relevant lanes to configure and one bit indication for + * enable/disable. + * + * CPUCP_PACKET_NIC_MAC_INIT - + * Configure the NIC MAC channels. The packet's arguments specify the + * NIC port and the speed. + * + * CPUCP_PACKET_MSI_INFO_SET - + * set the index number for each supported msi type going from + * host to device + * + * CPUCP_PACKET_NIC_XPCS91_REGS_GET - + * Fetch the un/correctable counters values from the NIC MAC. + * + * CPUCP_PACKET_NIC_STAT_REGS_GET - + * Fetch various NIC MAC counters from the NIC STAT. + * + * CPUCP_PACKET_NIC_STAT_REGS_CLR - + * Clear the various NIC MAC counters in the NIC STAT. + * + * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET - + * Fetch all NIC MAC counters from the NIC STAT. + * + * CPUCP_PACKET_IS_IDLE_CHECK - + * Check if the device is IDLE in regard to the DMA/compute engines + * and QMANs. The f/w will return a bitmask where each bit represents + * a different engine or QMAN according to enum cpucp_idle_mask. + * The bit will be 1 if the engine is NOT idle. + * + * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET - + * Fetch all HBM replaced-rows and prending to be replaced rows data. + * + * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS - + * Fetch status of HBM rows pending replacement and need a reboot to + * be replaced. + * + * CPUCP_PACKET_POWER_SET - + * Resets power history of device to 0 + * + * CPUCP_PACKET_ENGINE_CORE_ASID_SET - + * Packet to perform engine core ASID configuration + * + * CPUCP_PACKET_SEC_ATTEST_GET - + * Get the attestaion data that is collected during various stages of the + * boot sequence. the attestation data is also hashed with some unique + * number (nonce) provided by the host to prevent replay attacks. + * public key and certificate also provided as part of the FW response. + * + * CPUCP_PACKET_MONITOR_DUMP_GET - + * Get monitors registers dump from the CpuCP kernel. + * The CPU will put the registers dump in the a buffer allocated by the driver + * which address is passed via the CpuCp packet. In addition, the host's driver + * passes the max size it allows the CpuCP to write to the structure, to prevent + * data corruption in case of mismatched driver/FW versions. + * Obsolete. + * + * CPUCP_PACKET_GENERIC_PASSTHROUGH - + * Generic opcode for all firmware info that is only passed to host + * through the LKD, without getting parsed there. + * + * CPUCP_PACKET_ACTIVE_STATUS_SET - + * LKD sends FW indication whether device is free or in use, this indication is reported + * also to the BMC. + * + * CPUCP_PACKET_SOFT_RESET - + * Packet to perform soft-reset. + * + * CPUCP_PACKET_INTS_REGISTER - + * Packet to inform FW that queues have been established and LKD is ready to receive + * EQ events. + */ + +enum cpucp_packet_id { + CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */ + CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */ + CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */ + CPUCP_PACKET_VOLTAGE_GET, /* sysfs */ + CPUCP_PACKET_CURRENT_GET, /* sysfs */ + CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */ + CPUCP_PACKET_PWM_GET, /* sysfs */ + CPUCP_PACKET_PWM_SET, /* sysfs */ + CPUCP_PACKET_FREQUENCY_SET, /* sysfs */ + CPUCP_PACKET_FREQUENCY_GET, /* sysfs */ + CPUCP_PACKET_LED_SET, /* debugfs */ + CPUCP_PACKET_I2C_WR, /* debugfs */ + CPUCP_PACKET_I2C_RD, /* debugfs */ + CPUCP_PACKET_INFO_GET, /* IOCTL */ + CPUCP_PACKET_FLASH_PROGRAM_REMOVED, + CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */ + CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */ + CPUCP_PACKET_TEST, /* internal */ + CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */ + CPUCP_PACKET_MAX_POWER_GET, /* sysfs */ + CPUCP_PACKET_MAX_POWER_SET, /* sysfs */ + CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */ + CPUCP_PACKET_NIC_INFO_GET, /* internal */ + CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */ + CPUCP_PACKET_VOLTAGE_SET, /* sysfs */ + CPUCP_PACKET_CURRENT_SET, /* sysfs */ + CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ + CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ + CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */ + CPUCP_PACKET_PLL_INFO_GET, /* internal */ + CPUCP_PACKET_NIC_STATUS, /* internal */ + CPUCP_PACKET_POWER_GET, /* internal */ + CPUCP_PACKET_NIC_PFC_SET, /* internal */ + CPUCP_PACKET_NIC_FAULT_GET, /* internal */ + CPUCP_PACKET_NIC_LPBK_SET, /* internal */ + CPUCP_PACKET_NIC_MAC_CFG, /* internal */ + CPUCP_PACKET_MSI_INFO_SET, /* internal */ + CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */ + CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */ + CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */ + CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */ + CPUCP_PACKET_IS_IDLE_CHECK, /* internal */ + CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */ + CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */ + CPUCP_PACKET_POWER_SET, /* internal */ + CPUCP_PACKET_RESERVED, /* not used */ + CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */ + CPUCP_PACKET_RESERVED2, /* not used */ + CPUCP_PACKET_SEC_ATTEST_GET, /* internal */ + CPUCP_PACKET_RESERVED3, /* not used */ + CPUCP_PACKET_RESERVED4, /* not used */ + CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */ + CPUCP_PACKET_RESERVED5, /* not used */ + CPUCP_PACKET_RESERVED6, /* not used */ + CPUCP_PACKET_RESERVED7, /* not used */ + CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */ + CPUCP_PACKET_RESERVED8, /* not used */ + CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */ + CPUCP_PACKET_RESERVED9, /* not used */ + CPUCP_PACKET_RESERVED10, /* not used */ + CPUCP_PACKET_RESERVED11, /* not used */ + CPUCP_PACKET_RESERVED12, /* internal */ + CPUCP_PACKET_RESERVED13, /* internal */ + CPUCP_PACKET_SOFT_RESET, /* internal */ + CPUCP_PACKET_INTS_REGISTER, /* internal */ + CPUCP_PACKET_ID_MAX /* must be last */ +}; + +#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5 + +#define CPUCP_PKT_CTL_RC_SHIFT 12 +#define CPUCP_PKT_CTL_RC_MASK 0x0000F000 + +#define CPUCP_PKT_CTL_OPCODE_SHIFT 16 +#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 + +#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0 +#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull +#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16 +#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull +#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32 +#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull +#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48 +#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull + +#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0 +#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull +#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16 +#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull + +#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0 +#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull +#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1 +#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull + +#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0 +#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull +#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1 +#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull + +#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0 +#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull +#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1 +#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull + +/* heartbeat status bits */ +#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0 +#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001 + +struct cpucp_packet { + union { + __le64 value; /* For SET packets */ + __le64 result; /* For GET packets */ + __le64 addr; /* For PQ */ + }; + + __le32 ctl; + + __le32 fence; /* Signal to host that message is completed */ + + union { + struct {/* For temperature/current/voltage/fan/pwm get/set */ + __le16 sensor_index; + __le16 type; + }; + + struct { /* For I2C read/write */ + __u8 i2c_bus; + __u8 i2c_addr; + __u8 i2c_reg; + /* + * In legacy implemetations, i2c_len was not present, + * was unused and just added as pad. + * So if i2c_len is 0, it is treated as legacy + * and r/w 1 Byte, else if i2c_len is specified, + * its treated as new multibyte r/w support. + */ + __u8 i2c_len; + }; + + struct {/* For PLL info fetch */ + __le16 pll_type; + /* TODO pll_reg is kept temporary before removal */ + __le16 pll_reg; + }; + + /* For any general request */ + __le32 index; + + /* For frequency get/set */ + __le32 pll_index; + + /* For led set */ + __le32 led_index; + + /* For get CpuCP info/EEPROM data/NIC info */ + __le32 data_max_size; + + /* + * For any general status bitmask. Shall be used whenever the + * result cannot be used to hold general purpose data. + */ + __le32 status_mask; + + /* random, used once number, for security packets */ + __le32 nonce; + }; + + union { + /* For NIC requests */ + __le32 port_index; + + /* For Generic packet sub index */ + __le32 pkt_subidx; + }; +}; + +struct cpucp_unmask_irq_arr_packet { + struct cpucp_packet cpucp_pkt; + __le32 length; + __le32 irqs[]; +}; + +struct cpucp_nic_status_packet { + struct cpucp_packet cpucp_pkt; + __le32 length; + __le32 data[]; +}; + +struct cpucp_array_data_packet { + struct cpucp_packet cpucp_pkt; + __le32 length; + __le32 data[]; +}; + +enum cpucp_led_index { + CPUCP_LED0_INDEX = 0, + CPUCP_LED1_INDEX, + CPUCP_LED2_INDEX, + CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX +}; + +/* + * enum cpucp_packet_rc - Error return code + * @cpucp_packet_success -> in case of success. + * @cpucp_packet_invalid -> this is to support first generation platforms. + * @cpucp_packet_fault -> in case of processing error like failing to + * get device binding or semaphore etc. + * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported. + * @cpucp_packet_invalid_params -> when checking parameter like length of buffer + * or attribute value etc. + * @cpucp_packet_rc_max -> It indicates size of enum so should be at last. + */ +enum cpucp_packet_rc { + cpucp_packet_success, + cpucp_packet_invalid, + cpucp_packet_fault, + cpucp_packet_invalid_pkt, + cpucp_packet_invalid_params, + cpucp_packet_rc_max +}; + +/* + * cpucp_temp_type should adhere to hwmon_temp_attributes + * defined in Linux kernel hwmon.h file + */ +enum cpucp_temp_type { + cpucp_temp_input, + cpucp_temp_min = 4, + cpucp_temp_min_hyst, + cpucp_temp_max = 6, + cpucp_temp_max_hyst, + cpucp_temp_crit, + cpucp_temp_crit_hyst, + cpucp_temp_offset = 19, + cpucp_temp_lowest = 21, + cpucp_temp_highest = 22, + cpucp_temp_reset_history = 23, + cpucp_temp_warn = 24, + cpucp_temp_max_crit = 25, + cpucp_temp_max_warn = 26, +}; + +enum cpucp_in_attributes { + cpucp_in_input, + cpucp_in_min, + cpucp_in_max, + cpucp_in_lowest = 6, + cpucp_in_highest = 7, + cpucp_in_reset_history, + cpucp_in_intr_alarm_a, + cpucp_in_intr_alarm_b, +}; + +enum cpucp_curr_attributes { + cpucp_curr_input, + cpucp_curr_min, + cpucp_curr_max, + cpucp_curr_lowest = 6, + cpucp_curr_highest = 7, + cpucp_curr_reset_history +}; + +enum cpucp_fan_attributes { + cpucp_fan_input, + cpucp_fan_min = 2, + cpucp_fan_max +}; + +enum cpucp_pwm_attributes { + cpucp_pwm_input, + cpucp_pwm_enable +}; + +enum cpucp_pcie_throughput_attributes { + cpucp_pcie_throughput_tx, + cpucp_pcie_throughput_rx +}; + +/* TODO temporary kept before removal */ +enum cpucp_pll_reg_attributes { + cpucp_pll_nr_reg, + cpucp_pll_nf_reg, + cpucp_pll_od_reg, + cpucp_pll_div_factor_reg, + cpucp_pll_div_sel_reg +}; + +/* TODO temporary kept before removal */ +enum cpucp_pll_type_attributes { + cpucp_pll_cpu, + cpucp_pll_pci, +}; + +/* + * cpucp_power_type aligns with hwmon_power_attributes + * defined in Linux kernel hwmon.h file + */ +enum cpucp_power_type { + CPUCP_POWER_INPUT = 8, + CPUCP_POWER_INPUT_HIGHEST = 9, + CPUCP_POWER_RESET_INPUT_HISTORY = 11 +}; + +/* + * MSI type enumeration table for all ASICs and future SW versions. + * For future ASIC-LKD compatibility, we can only add new enumerations. + * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES). + * Changing the order of entries or removing entries is not allowed. + */ +enum cpucp_msi_type { + CPUCP_EVENT_QUEUE_MSI_TYPE, + CPUCP_NIC_PORT1_MSI_TYPE, + CPUCP_NIC_PORT3_MSI_TYPE, + CPUCP_NIC_PORT5_MSI_TYPE, + CPUCP_NIC_PORT7_MSI_TYPE, + CPUCP_NIC_PORT9_MSI_TYPE, + CPUCP_EVENT_QUEUE_ERR_MSI_TYPE, + CPUCP_NUM_OF_MSI_TYPES +}; + +/* + * PLL enumeration table used for all ASICs and future SW versions. + * For future ASIC-LKD compatibility, we can only add new enumerations. + * at the end of the table. + * Changing the order of entries or removing entries is not allowed. + */ +enum pll_index { + CPU_PLL = 0, + PCI_PLL = 1, + NIC_PLL = 2, + DMA_PLL = 3, + MESH_PLL = 4, + MME_PLL = 5, + TPC_PLL = 6, + IF_PLL = 7, + SRAM_PLL = 8, + NS_PLL = 9, + HBM_PLL = 10, + MSS_PLL = 11, + DDR_PLL = 12, + VID_PLL = 13, + BANK_PLL = 14, + MMU_PLL = 15, + IC_PLL = 16, + MC_PLL = 17, + EMMC_PLL = 18, + D2D_PLL = 19, + CS_PLL = 20, + C2C_PLL = 21, + NCH_PLL = 22, + C2M_PLL = 23, + PLL_MAX +}; + +enum rl_index { + TPC_RL = 0, + MME_RL, + EDMA_RL, +}; + +enum pvt_index { + PVT_SW, + PVT_SE, + PVT_NW, + PVT_NE +}; + +/* Event Queue Packets */ + +struct eq_generic_event { + __le64 data[7]; +}; + +/* + * CpuCP info + */ + +#define CARD_NAME_MAX_LEN 16 +#define CPUCP_MAX_SENSORS 128 +#define CPUCP_MAX_NICS 128 +#define CPUCP_LANES_PER_NIC 4 +#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024 +#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC) +#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64) +#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64) +#define CPUCP_HBM_ROW_REPLACE_MAX 32 + +struct cpucp_sensor { + __le32 type; + __le32 flags; +}; + +/** + * struct cpucp_card_types - ASIC card type. + * @cpucp_card_type_pci: PCI card. + * @cpucp_card_type_pmc: PCI Mezzanine Card. + */ +enum cpucp_card_types { + cpucp_card_type_pci, + cpucp_card_type_pmc +}; + +#define CPUCP_SEC_CONF_ENABLED_SHIFT 0 +#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001 + +#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1 +#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002 + +#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2 +#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004 + +/** + * struct cpucp_security_info - Security information. + * @config: configuration bit field + * @keys_num: number of stored keys + * @revoked_keys: revoked keys bit field + * @min_svn: minimal security version + */ +struct cpucp_security_info { + __u8 config; + __u8 keys_num; + __u8 revoked_keys; + __u8 min_svn; +}; + +/** + * struct cpucp_info - Info from CpuCP that is necessary to the host's driver + * @sensors: available sensors description. + * @kernel_version: CpuCP linux kernel version. + * @reserved: reserved field. + * @card_type: card configuration type. + * @card_location: in a server, each card has different connections topology + * depending on its location (relevant for PMC card type) + * @cpld_version: CPLD programmed F/W version. + * @infineon_version: Infineon main DC-DC version. + * @fuse_version: silicon production FUSE information. + * @thermal_version: thermald S/W version. + * @cpucp_version: CpuCP S/W version. + * @infineon_second_stage_version: Infineon 2nd stage DC-DC version. + * @dram_size: available DRAM size. + * @card_name: card name that will be displayed in HWMON subsystem on the host + * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance + * (0 = functional, 1 = binned) + * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance + * (0 = functional, 1 = binned), maximum 1 per dcore + * @sram_binning: Categorize SRAM functionality + * (0 = fully functional, 1 = lower-half is not functional, + * 2 = upper-half is not functional) + * @sec_info: security information + * @pll_map: Bit map of supported PLLs for current ASIC version. + * @mme_binning_mask: MME binning mask, + * bits [0:6] <==> dcore0 mme fma + * bits [7:13] <==> dcore1 mme fma + * bits [14:20] <==> dcore0 mme ima + * bits [21:27] <==> dcore1 mme ima + * For each group, if the 6th bit is set then first 5 bits + * represent the col's idx [0-31], otherwise these bits are + * ignored, and col idx 32 is binned. 7th bit is don't care. + * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance + * (0 = functional 1 = binned) + * @memory_repair_flag: eFuse flag indicating memory repair + * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance + * (0 = functional 1 = binned) + * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance + * (0 = functional 1 = binned) + * @interposer_version: Interposer version programmed in eFuse + * @substrate_version: Substrate version programmed in eFuse + * @eq_health_check_supported: eq health check feature supported in FW. + * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM. + * @fw_os_version: Firmware OS Version + */ +struct cpucp_info { + struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; + __u8 kernel_version[VERSION_MAX_LEN]; + __le32 reserved; + __le32 card_type; + __le32 card_location; + __le32 cpld_version; + __le32 infineon_version; + __u8 fuse_version[VERSION_MAX_LEN]; + __u8 thermal_version[VERSION_MAX_LEN]; + __u8 cpucp_version[VERSION_MAX_LEN]; + __le32 infineon_second_stage_version; + __le64 dram_size; + char card_name[CARD_NAME_MAX_LEN]; + __le64 tpc_binning_mask; + __le64 decoder_binning_mask; + __u8 sram_binning; + __u8 dram_binning_mask; + __u8 memory_repair_flag; + __u8 edma_binning_mask; + __u8 xbar_binning_mask; + __u8 interposer_version; + __u8 substrate_version; + __u8 eq_health_check_supported; + struct cpucp_security_info sec_info; + __le32 fw_hbm_region_size; + __u8 pll_map[PLL_MAP_LEN]; + __le64 mme_binning_mask; + __u8 fw_os_version[VERSION_MAX_LEN]; +}; + +struct cpucp_mac_addr { + __u8 mac_addr[ETH_ALEN]; +}; + +enum cpucp_serdes_type { + TYPE_1_SERDES_TYPE, + TYPE_2_SERDES_TYPE, + HLS1_SERDES_TYPE, + HLS1H_SERDES_TYPE, + HLS2_SERDES_TYPE, + HLS2_TYPE_1_SERDES_TYPE, + MAX_NUM_SERDES_TYPE, /* number of types */ + UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */ +}; + +struct cpucp_nic_info { + struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS]; + __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN]; + __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN]; + __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN]; + __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN]; + __le16 serdes_type; /* enum cpucp_serdes_type */ + __le16 tx_swap_map[CPUCP_MAX_NICS]; + __u8 reserved[6]; +}; + +#define PAGE_DISCARD_MAX 64 + +struct page_discard_info { + __u8 num_entries; + __u8 reserved[7]; + __le32 mmu_page_idx[PAGE_DISCARD_MAX]; +}; + +/* + * struct frac_val - fracture value represented by "integer.frac". + * @integer: the integer part of the fracture value; + * @frac: the fracture part of the fracture value. + */ +struct frac_val { + union { + struct { + __le16 integer; + __le16 frac; + }; + __le32 val; + }; +}; + +/* + * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp". + * @integer: the integer part of the SER value; + * @exp: the exponent part of the SER value. + */ +struct ser_val { + __le16 integer; + __le16 exp; +}; + +/* + * struct cpucp_nic_status - describes the status of a NIC port. + * @port: NIC port index. + * @bad_format_cnt: e.g. CRC. + * @responder_out_of_sequence_psn_cnt: e.g NAK. + * @high_ber_reinit_cnt: link reinit due to high BER. + * @correctable_err_cnt: e.g. bit-flip. + * @uncorrectable_err_cnt: e.g. MAC errors. + * @retraining_cnt: re-training counter. + * @up: is port up. + * @pcs_link: has PCS link. + * @phy_ready: is PHY ready. + * @auto_neg: is Autoneg enabled. + * @timeout_retransmission_cnt: timeout retransmission events. + * @high_ber_cnt: high ber events. + * @pre_fec_ser: pre FEC SER value. + * @post_fec_ser: post FEC SER value. + * @throughput: measured throughput. + * @latency: measured latency. + */ +struct cpucp_nic_status { + __le32 port; + __le32 bad_format_cnt; + __le32 responder_out_of_sequence_psn_cnt; + __le32 high_ber_reinit; + __le32 correctable_err_cnt; + __le32 uncorrectable_err_cnt; + __le32 retraining_cnt; + __u8 up; + __u8 pcs_link; + __u8 phy_ready; + __u8 auto_neg; + __le32 timeout_retransmission_cnt; + __le32 high_ber_cnt; + struct ser_val pre_fec_ser; + struct ser_val post_fec_ser; + struct frac_val bandwidth; + struct frac_val lat; +}; + +enum cpucp_hbm_row_replace_cause { + REPLACE_CAUSE_DOUBLE_ECC_ERR, + REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR, +}; + +struct cpucp_hbm_row_info { + __u8 hbm_idx; + __u8 pc; + __u8 sid; + __u8 bank_idx; + __le16 row_addr; + __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */ + __u8 pad; +}; + +struct cpucp_hbm_row_replaced_rows_info { + __le16 num_replaced_rows; + __u8 pad[6]; + struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX]; +}; + +enum cpu_reset_status { + CPU_RST_STATUS_NA = 0, + CPU_RST_STATUS_SOFT_RST_DONE = 1, +}; + +#define SEC_PCR_DATA_BUF_SZ 256 +#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */ +#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */ +#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */ +#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */ + +/* + * struct cpucp_sec_attest_info - attestation report of the boot + * @pcr_data: raw values of the PCR registers + * @pcr_num_reg: number of PCR registers in the pcr_data array + * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes) + * @nonce: number only used once. random number provided by host. this also + * passed to the quote command as a qualifying data. + * @pcr_quote_len: length of the attestation quote data (bytes) + * @pcr_quote: attestation report data structure + * @quote_sig_len: length of the attestation report signature (bytes) + * @quote_sig: signature structure of the attestation report + * @pub_data_len: length of the public data (bytes) + * @public_data: public key for the signed attestation + * (outPublic + name + qualifiedName) + * @certificate_len: length of the certificate (bytes) + * @certificate: certificate for the attestation signing key + */ +struct cpucp_sec_attest_info { + __u8 pcr_data[SEC_PCR_DATA_BUF_SZ]; + __u8 pcr_num_reg; + __u8 pcr_reg_len; + __le16 pad0; + __le32 nonce; + __le16 pcr_quote_len; + __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ]; + __u8 quote_sig_len; + __u8 quote_sig[SEC_SIGNATURE_BUF_SZ]; + __le16 pub_data_len; + __u8 public_data[SEC_PUB_DATA_BUF_SZ]; + __le16 certificate_len; + __u8 certificate[SEC_CERTIFICATE_BUF_SZ]; +}; + +/* + * struct cpucp_dev_info_signed - device information signed by a secured device + * @info: device information structure as defined above + * @nonce: number only used once. random number provided by host. this number is + * hashed and signed along with the device information. + * @info_sig_len: length of the attestation signature (bytes) + * @info_sig: signature of the info + nonce data. + * @pub_data_len: length of the public data (bytes) + * @public_data: public key info signed info data + * (outPublic + name + qualifiedName) + * @certificate_len: length of the certificate (bytes) + * @certificate: certificate for the signing key + */ +struct cpucp_dev_info_signed { + struct cpucp_info info; /* assumed to be 64bit aligned */ + __le32 nonce; + __le32 pad0; + __u8 info_sig_len; + __u8 info_sig[SEC_SIGNATURE_BUF_SZ]; + __le16 pub_data_len; + __u8 public_data[SEC_PUB_DATA_BUF_SZ]; + __le16 certificate_len; + __u8 certificate[SEC_CERTIFICATE_BUF_SZ]; +}; + +#define DCORE_MON_REGS_SZ 512 +/* + * struct dcore_monitor_regs_data - DCORE monitor regs data. + * the structure follows sync manager block layout. Obsolete. + * @mon_pay_addrl: array of payload address low bits. + * @mon_pay_addrh: array of payload address high bits. + * @mon_pay_data: array of payload data. + * @mon_arm: array of monitor arm. + * @mon_status: array of monitor status. + */ +struct dcore_monitor_regs_data { + __le32 mon_pay_addrl[DCORE_MON_REGS_SZ]; + __le32 mon_pay_addrh[DCORE_MON_REGS_SZ]; + __le32 mon_pay_data[DCORE_MON_REGS_SZ]; + __le32 mon_arm[DCORE_MON_REGS_SZ]; + __le32 mon_status[DCORE_MON_REGS_SZ]; +}; + +/* contains SM data for each SYNC_MNGR (Obsolete) */ +struct cpucp_monitor_dump { + struct dcore_monitor_regs_data sync_mngr_w_s; + struct dcore_monitor_regs_data sync_mngr_e_s; + struct dcore_monitor_regs_data sync_mngr_w_n; + struct dcore_monitor_regs_data sync_mngr_e_n; +}; + +/* + * The Type of the generic request (and other input arguments) will be fetched from user by reading + * from "pkt_subidx" field in struct cpucp_packet. + * + * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions. + */ +enum hl_passthrough_type { + HL_PASSTHROUGH_VERSIONS, +}; + +#endif /* CPUCP_IF_H */ diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h new file mode 100644 index 0000000000..93366d5621 --- /dev/null +++ b/include/linux/habanalabs/hl_boot_if.h @@ -0,0 +1,792 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018-2020 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef HL_BOOT_IF_H +#define HL_BOOT_IF_H + +#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */ +#define HL_POWER9_HOST_MAGIC 0x1DA30009 + +#define BOOT_FIT_SRAM_OFFSET 0x200000 + +#define VERSION_MAX_LEN 128 + +enum cpu_boot_err { + CPU_BOOT_ERR_DRAM_INIT_FAIL = 0, + CPU_BOOT_ERR_FIT_CORRUPTED = 1, + CPU_BOOT_ERR_TS_INIT_FAIL = 2, + CPU_BOOT_ERR_DRAM_SKIPPED = 3, + CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4, + CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5, + CPU_BOOT_ERR_NIC_FW_FAIL = 6, + CPU_BOOT_ERR_SECURITY_NOT_RDY = 7, + CPU_BOOT_ERR_SECURITY_FAIL = 8, + CPU_BOOT_ERR_EFUSE_FAIL = 9, + CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10, + CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11, + CPU_BOOT_ERR_PLL_FAIL = 12, + CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13, + CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18, + CPU_BOOT_ERR_BINNING_FAIL = 19, + CPU_BOOT_ERR_TPM_FAIL = 20, + CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21, + CPU_BOOT_ERR_EEPROM_FAIL = 22, + CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23, + CPU_BOOT_ERR_ENABLED = 31, + CPU_BOOT_ERR_SCND_EN = 63, + CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */ +}; + +/* + * Mask for fatal failures + * This mask contains all possible fatal failures, and a dynamic code + * will clear the non-relevant ones. + */ +#define CPU_BOOT_ERR_FATAL_MASK \ + ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \ + (1 << CPU_BOOT_ERR_PLL_FAIL) | \ + (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \ + (1 << CPU_BOOT_ERR_BINNING_FAIL) | \ + (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \ + (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \ + (1 << CPU_BOOT_ERR_EEPROM_FAIL)) + +/* + * CPU error bits in BOOT_ERROR registers + * + * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed. + * DRAM is not reliable to use. + * + * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the + * image provided by the host has failed. + * + * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed. + * Boot continues as usual, but keep in + * mind this is a warning. + * + * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped. + * Skipping DRAM initialization has been + * requested (e.g. strap, command, etc.) + * and FW skipped the DRAM initialization. + * Host can initialize the DRAM. + * + * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped. + * Meaning the BMC data might not be + * available until reset. + * + * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready. + * BMC has not provided the NIC data yet. + * Once provided this bit will be cleared. + * + * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed. + * The NIC FW loading and initialization + * failed. This means NICs are not usable. + * + * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been + * started, but is not ready yet - chip + * cannot be accessed. + * + * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed. + * The tasks are security init (root of + * trust), boot authentication (chain of + * trust), data packets authentication. + * + * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed. + * The PCI device ID might be wrong. + * + * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed. + * It mean that ppboot checksum + * verification for the preboot primary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * + * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed. + * It mean that ppboot checksum + * verification for the preboot secondary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * + * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one + * of the PLLs remains in REF_CLK + * + * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support + * should be contacted. + * + * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during + * the execution of ppboot or preboot. + * for example: stack overflow. + * + * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning + * malfunctioning components might still be + * in use. + * + * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed. + * + * CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature + * sensor. + * + * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults + * are used. + * + * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm + * memories. Boot disabled until reset. + * + * CPU_BOOT_ERR0_ENABLED Error registers enabled. + * This is a main indication that the + * running FW populates the error + * registers. Meaning the error bits are + * not garbage, but actual error statuses. + */ +#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) +#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED) +#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL) +#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED) +#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED) +#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY) +#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL) +#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY) +#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL) +#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL) +#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL) +#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL) +#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL) +#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) +#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR) +#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL) +#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL) +#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL) +#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL) +#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) +#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED) +#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED) + +enum cpu_boot_dev_sts { + CPU_BOOT_DEV_STS_SECURITY_EN = 0, + CPU_BOOT_DEV_STS_DEBUG_EN = 1, + CPU_BOOT_DEV_STS_WATCHDOG_EN = 2, + CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3, + CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4, + CPU_BOOT_DEV_STS_E2E_CRED_EN = 5, + CPU_BOOT_DEV_STS_HBM_CRED_EN = 6, + CPU_BOOT_DEV_STS_RL_EN = 7, + CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8, + CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9, + CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10, + CPU_BOOT_DEV_STS_PLL_INFO_EN = 11, + CPU_BOOT_DEV_STS_SP_SRAM_EN = 12, + CPU_BOOT_DEV_STS_CLK_GATE_EN = 13, + CPU_BOOT_DEV_STS_HBM_ECC_EN = 14, + CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15, + CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16, + CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17, + CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18, + CPU_BOOT_DEV_STS_DYN_PLL_EN = 19, + CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20, + CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21, + CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22, + CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23, + CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24, + CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25, + CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26, + CPU_BOOT_DEV_STS_ENABLED = 31, + CPU_BOOT_DEV_STS_SCND_EN = 63, + CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */ +}; + +/* + * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers + * + * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled. + * This is an indication for security + * enabled in FW, which means that + * all conditions for security are met: + * device is indicated as security enabled, + * registers are protected, and device + * uses keys for image verification. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled. + * Enabled when JTAG or DEBUG is enabled + * in FW. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled. + * Watchdog is enabled in FW. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled. + * DRAM initialization has been done in FW. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled. + * If set, it means that during boot, + * FW waited for BMC data. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized. + * FW initialized E2E credits. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized. + * FW initialized HBM credits. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized. + * FW initialized rate limiter. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled. + * FW initialized SRAM scrambler. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled. + * FW initialized DRAM scrambler. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled. + * FW has the hard reset procedure + * implemented. This means that FW will + * perform hard reset procedure on + * receiving the halt-machine event. + * Initialized in: preboot, u-boot, linux + * + * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available + * for use. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled. + * FW initialized Clock Gating. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled. + * FW handles HBM ECC indications. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd + * is set to the PI counter. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication + * protocol is enabled. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled. + * This bit if set, means the iATU has been + * configured and is ready for use. + * Initialized in: ppboot + * + * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and + * any access to them is done via the FW. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled. + * FW sends to host a bitmap of supported + * PLLs. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from + * previleged entity. FW sets this status + * bit for host. If this bit is set then + * GIC can not be accessed from host. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running + * index for each new event sent to host. + * This is used as a method in host to + * identify that the waiting event in + * queue is actually a new event which + * was not served before. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to + * prevent IRQs overriding each other. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN + * NIC STAT and XPCS91 access is restricted + * and is done via FW only. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN + * NIC STAT get all is supported. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN + * F/W checks if the device is idle by reading defined set + * of registers. It returns a bitmask of all the engines, + * where a bit is set if the engine is not idle. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_MAP_HWMON_EN + * If set, means f/w supports proprietary + * HWMON enum mapping to cpucp enums. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. + * This is a main indication that the + * running FW populates the device status + * register. Meaning the device status + * bits are not garbage, but actual + * statuses. + * Initialized in: preboot + * + */ +#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN) +#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN) +#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN) +#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN) +#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN) +#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN) +#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN) +#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN) +#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN) +#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN) +#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN) +#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN) +#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN) +#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN) +#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN) +#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN) +#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN) +#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN) +#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN) +#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN) +#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN) +#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN) +#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN) +#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN) +#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN) +#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN) +#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN) +#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) +#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) + +enum cpu_boot_status { + CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */ + CPU_BOOT_STATUS_IN_WFE = 1, + CPU_BOOT_STATUS_DRAM_RDY = 2, + CPU_BOOT_STATUS_SRAM_AVAIL = 3, + CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */ + CPU_BOOT_STATUS_IN_PREBOOT = 5, + CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */ + CPU_BOOT_STATUS_IN_UBOOT = 7, + CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */ + CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */ + /* U-Boot console prompt activated, commands are not processed */ + CPU_BOOT_STATUS_UBOOT_NOT_READY = 10, + /* Finished NICs init, reported after DRAM and NICs */ + CPU_BOOT_STATUS_NIC_FW_RDY = 11, + CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */ + CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */ + CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */ + /* Last boot loader progress status, ready to receive commands */ + CPU_BOOT_STATUS_READY_TO_BOOT = 15, + /* Internal Boot finished, ready for boot-fit */ + CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16, + /* Internal Security has been initialized, device can be accessed */ + CPU_BOOT_STATUS_SECURITY_READY = 17, + /* FW component is preparing to shutdown and communication with host is not available */ + CPU_BOOT_STATUS_FW_SHUTDOWN_PREP = 18, +}; + +enum kmd_msg { + KMD_MSG_NA = 0, + KMD_MSG_GOTO_WFE, + KMD_MSG_FIT_RDY, + KMD_MSG_SKIP_BMC, + RESERVED, + KMD_MSG_RST_DEV, + KMD_MSG_LAST +}; + +enum cpu_msg_status { + CPU_MSG_CLR = 0, + CPU_MSG_OK, + CPU_MSG_ERR, +}; + +/* communication registers mapping - consider ABI when changing */ +struct cpu_dyn_regs { + __le32 cpu_pq_base_addr_low; + __le32 cpu_pq_base_addr_high; + __le32 cpu_pq_length; + __le32 cpu_pq_init_status; + __le32 cpu_eq_base_addr_low; + __le32 cpu_eq_base_addr_high; + __le32 cpu_eq_length; + __le32 cpu_eq_ci; + __le32 cpu_cq_base_addr_low; + __le32 cpu_cq_base_addr_high; + __le32 cpu_cq_length; + __le32 cpu_pf_pq_pi; + __le32 cpu_boot_dev_sts0; + __le32 cpu_boot_dev_sts1; + __le32 cpu_boot_err0; + __le32 cpu_boot_err1; + __le32 cpu_boot_status; + __le32 fw_upd_sts; + __le32 fw_upd_cmd; + __le32 fw_upd_pending_sts; + __le32 fuse_ver_offset; + __le32 preboot_ver_offset; + __le32 uboot_ver_offset; + __le32 hw_state; + __le32 kmd_msg_to_cpu; + __le32 cpu_cmd_status_to_host; + __le32 gic_host_pi_upd_irq; + __le32 gic_tpc_qm_irq_ctrl; + __le32 gic_mme_qm_irq_ctrl; + __le32 gic_dma_qm_irq_ctrl; + __le32 gic_nic_qm_irq_ctrl; + __le32 gic_dma_core_irq_ctrl; + __le32 gic_host_halt_irq; + __le32 gic_host_ints_irq; + __le32 gic_host_soft_rst_irq; + __le32 gic_rot_qm_irq_ctrl; + __le32 cpu_rst_status; + __le32 eng_arc_irq_ctrl; + __le32 reserved1[20]; /* reserve for future use */ +}; + +/* TODO: remove the desc magic after the code is updated to use message */ +/* HCDM - Habana Communications Descriptor Magic */ +#define HL_COMMS_DESC_MAGIC 0x4843444D +#define HL_COMMS_DESC_VER 3 + +/* HCMv - Habana Communications Message + header version */ +#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00 +#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00 +#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF + +#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \ + ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK)) +#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC +#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1) +#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2) +#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3) + +#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3 + +#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \ + (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \ + HL_COMMS_MSG_MAGIC_VALUE) + +#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \ + (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \ + ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK)) + +#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \ + (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \ + HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver))) + +enum comms_msg_type { + HL_COMMS_DESC_TYPE = 0, + HL_COMMS_RESET_CAUSE_TYPE = 1, + HL_COMMS_FW_CFG_SKIP_TYPE = 2, + HL_COMMS_BINNING_CONF_TYPE = 3, +}; + +/* + * Binning information shared between LKD and FW + * @tpc_mask_l - TPC binning information lower 64 bit + * @dec_mask - Decoder binning information + * @dram_mask - DRAM binning information + * @edma_mask - EDMA binning information + * @mme_mask_l - MME binning information lower 32 + * @mme_mask_h - MME binning information upper 32 + * @rot_mask - Rotator binning information + * @xbar_mask - xBAR binning information + * @reserved - reserved field for future binning info w/o ABI change + * @tpc_mask_h - TPC binning information upper 64 bit + * @nic_mask - NIC binning information + */ +struct lkd_fw_binning_info { + __le64 tpc_mask_l; + __le32 dec_mask; + __le32 dram_mask; + __le32 edma_mask; + __le32 mme_mask_l; + __le32 mme_mask_h; + __le32 rot_mask; + __le32 xbar_mask; + __le32 reserved0; + __le64 tpc_mask_h; + __le64 nic_mask; + __le32 reserved1[8]; +}; + +/* TODO: remove this struct after the code is updated to use message */ +/* this is the comms descriptor header - meta data */ +struct comms_desc_header { + __le32 magic; /* magic for validation */ + __le32 crc32; /* CRC32 of the descriptor w/o header */ + __le16 size; /* size of the descriptor w/o header */ + __u8 version; /* descriptor version */ + __u8 reserved[5]; /* pad to 64 bit */ +}; + +/* this is the comms message header - meta data */ +struct comms_msg_header { + __le32 magic; /* magic for validation */ + __le32 crc32; /* CRC32 of the message w/o header */ + __le16 size; /* size of the message w/o header */ + __u8 version; /* message payload version */ + __u8 type; /* message type */ + __u8 reserved[4]; /* pad to 64 bit */ +}; + +enum lkd_fw_ascii_msg_lvls { + LKD_FW_ASCII_MSG_ERR = 0, + LKD_FW_ASCII_MSG_WRN = 1, + LKD_FW_ASCII_MSG_INF = 2, + LKD_FW_ASCII_MSG_DBG = 3, +}; + +#define LKD_FW_ASCII_MSG_MAX_LEN 128 +#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */ + +struct lkd_fw_ascii_msg { + __u8 valid; + __u8 msg_lvl; + __u8 reserved[6]; + char msg[LKD_FW_ASCII_MSG_MAX_LEN]; +}; + +/* this is the main FW descriptor - consider ABI when changing */ +struct lkd_fw_comms_desc { + struct comms_desc_header header; + struct cpu_dyn_regs cpu_dyn_regs; + char fuse_ver[VERSION_MAX_LEN]; + char cur_fw_ver[VERSION_MAX_LEN]; + /* can be used for 1 more version w/o ABI change */ + char reserved0[VERSION_MAX_LEN]; + __le64 img_addr; /* address for next FW component load */ + struct lkd_fw_binning_info binning_info; + struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX]; + __le32 rsvd_mem_size_mb; /* reserved memory size [MB] for FW/SVE */ + char reserved1[4]; +}; + +enum comms_reset_cause { + HL_RESET_CAUSE_UNKNOWN = 0, + HL_RESET_CAUSE_HEARTBEAT = 1, + HL_RESET_CAUSE_TDR = 2, +}; + +/* TODO: remove define after struct name is aligned on all projects */ +#define lkd_msg_comms lkd_fw_comms_msg + +/* this is the comms message descriptor */ +struct lkd_fw_comms_msg { + struct comms_msg_header header; + /* union for future expantions of new messages */ + union { + struct { + struct cpu_dyn_regs cpu_dyn_regs; + char fuse_ver[VERSION_MAX_LEN]; + char cur_fw_ver[VERSION_MAX_LEN]; + /* can be used for 1 more version w/o ABI change */ + char reserved0[VERSION_MAX_LEN]; + /* address for next FW component load */ + __le64 img_addr; + struct lkd_fw_binning_info binning_info; + struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX]; + /* reserved memory size [MB] for FW/SVE */ + __le32 rsvd_mem_size_mb; + char reserved1[4]; + }; + struct { + __u8 reset_cause; + }; + struct { + __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */ + }; + struct lkd_fw_binning_info binning_conf; + }; +}; + +/* + * LKD commands: + * + * COMMS_NOOP Used to clear the command register and no actual + * command is send. + * + * COMMS_CLR_STS Clear status command - FW should clear the + * status register. Used for synchronization + * between the commands as part of the race free + * protocol. + * + * COMMS_RST_STATE Reset the current communication state which is + * kept by FW for proper responses. + * Should be used in the beginning of the + * communication cycle to clean any leftovers from + * previous communication attempts. + * + * COMMS_PREP_DESC Prepare descriptor for setting up the + * communication and other dynamic data: + * struct lkd_fw_comms_desc. + * This command has a parameter stating the next FW + * component size, so the FW can actually prepare a + * space for it and in the status response provide + * the descriptor offset. The Offset of the next FW + * data component is a part of the descriptor + * structure. + * + * COMMS_DATA_RDY The FW data has been uploaded and is ready for + * validation. + * + * COMMS_EXEC Execute the next FW component. + * + * COMMS_RST_DEV Reset the device. + * + * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure + * devices. + * + * COMMS_SKIP_BMC Perform actions required for BMC-less servers. + * Do not wait for BMC response. + * + * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory + * space is allocated in a ELBI access only + * address range. + * + */ +enum comms_cmd { + COMMS_NOOP = 0, + COMMS_CLR_STS = 1, + COMMS_RST_STATE = 2, + COMMS_PREP_DESC = 3, + COMMS_DATA_RDY = 4, + COMMS_EXEC = 5, + COMMS_RST_DEV = 6, + COMMS_GOTO_WFE = 7, + COMMS_SKIP_BMC = 8, + COMMS_PREP_DESC_ELBI = 10, + COMMS_INVLD_LAST +}; + +#define COMMS_COMMAND_SIZE_SHIFT 0 +#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF +#define COMMS_COMMAND_CMD_SHIFT 27 +#define COMMS_COMMAND_CMD_MASK 0xF8000000 + +/* + * LKD command to FW register structure + * @size - FW component size + * @cmd - command from enum comms_cmd + */ +struct comms_command { + union { /* bit fields are only for FW use */ + struct { + u32 size :25; /* 32MB max. */ + u32 reserved :2; + enum comms_cmd cmd :5; /* 32 commands */ + }; + __le32 val; + }; +}; + +/* + * FW status + * + * COMMS_STS_NOOP Used to clear the status register and no actual + * status is provided. + * + * COMMS_STS_ACK Command has been received and recognized. + * + * COMMS_STS_OK Command execution has finished successfully. + * + * COMMS_STS_ERR Command execution was unsuccessful and resulted + * in error. + * + * COMMS_STS_VALID_ERR FW validation has failed. + * + * COMMS_STS_TIMEOUT_ERR Command execution has timed out. + */ +enum comms_sts { + COMMS_STS_NOOP = 0, + COMMS_STS_ACK = 1, + COMMS_STS_OK = 2, + COMMS_STS_ERR = 3, + COMMS_STS_VALID_ERR = 4, + COMMS_STS_TIMEOUT_ERR = 5, + COMMS_STS_INVLD_LAST +}; + +/* RAM types for FW components loading - defines the base address */ +enum comms_ram_types { + COMMS_SRAM = 0, + COMMS_DRAM = 1, +}; + +#define COMMS_STATUS_OFFSET_SHIFT 0 +#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF +#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2 +#define COMMS_STATUS_RAM_TYPE_SHIFT 26 +#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000 +#define COMMS_STATUS_STATUS_SHIFT 28 +#define COMMS_STATUS_STATUS_MASK 0xF0000000 + +/* + * FW status to LKD register structure + * @offset - an offset from the base of the ram_type shifted right by + * 2 bits (always aligned to 32 bits). + * Allows a maximum addressable offset of 256MB from RAM base. + * Example: for real offset in RAM of 0x800000 (8MB), the value + * in offset field is (0x800000 >> 2) = 0x200000. + * @ram_type - the RAM type that should be used for offset from + * enum comms_ram_types + * @status - status from enum comms_sts + */ +struct comms_status { + union { /* bit fields are only for FW use */ + struct { + u32 offset :26; + enum comms_ram_types ram_type :2; + enum comms_sts status :4; /* 16 statuses */ + }; + __le32 val; + }; +}; + +#define NAME_MAX_LEN 32 /* bytes */ +struct hl_module_data { + __u8 name[NAME_MAX_LEN]; + __u8 version[VERSION_MAX_LEN]; +}; + +/** + * struct hl_component_versions - versions associated with hl component. + * @struct_size: size of all the struct (including dynamic size of modules). + * @modules_offset: offset of the modules field in this struct. + * @component: version of the component itself. + * @fw_os: Firmware OS Version. + * @comp_name: Name of the component. + * @modules_counter: number of set bits in modules_mask. + * @reserved: reserved for future use. + * @modules: versions of the component's modules. Elborated explanation in + * struct cpucp_versions. + */ +struct hl_component_versions { + __le16 struct_size; + __le16 modules_offset; + __u8 component[VERSION_MAX_LEN]; + __u8 fw_os[VERSION_MAX_LEN]; + __u8 comp_name[NAME_MAX_LEN]; + __u8 modules_counter; + __u8 reserved[3]; + struct hl_module_data modules[]; +}; + +/* Max size of fit size */ +#define HL_FW_VERSIONS_FIT_SIZE 4096 + +#endif /* HL_BOOT_IF_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index 3b08a29572..bf43f3ff66 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -836,11 +836,11 @@ struct hid_driver { void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); -#ifdef CONFIG_PM + int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); int (*reset_resume)(struct hid_device *hdev); -#endif + /* private: */ struct device_driver driver; }; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 75607d4ba2..be20cff4ba 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -551,10 +551,24 @@ static inline void folio_zero_range(struct folio *folio, zero_user_segments(&folio->page, start, start + length, 0, 0); } -static inline void unmap_and_put_page(struct page *page, void *addr) +/** + * folio_release_kmap - Unmap a folio and drop a refcount. + * @folio: The folio to release. + * @addr: The address previously returned by a call to kmap_local_folio(). + * + * It is common, eg in directory handling to kmap a folio. This function + * unmaps the folio and drops the refcount that was being held to keep the + * folio alive while we accessed it. + */ +static inline void folio_release_kmap(struct folio *folio, void *addr) { kunmap_local(addr); - put_page(page); + folio_put(folio); +} + +static inline void unmap_and_put_page(struct page *page, void *addr) +{ + folio_release_kmap(page_folio(page), addr); } #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 5c4b3a6805..4c5611d99c 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -308,6 +308,18 @@ struct qm_err_isolate { struct list_head qm_hw_errs; }; +struct qm_rsv_buf { + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqc *eqc; + struct qm_aeqc *aeqc; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqc_dma; + dma_addr_t aeqc_dma; + struct qm_dma qcdma; +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -340,6 +352,7 @@ struct hisi_qm { dma_addr_t cqc_dma; dma_addr_t eqe_dma; dma_addr_t aeqe_dma; + struct qm_rsv_buf xqc_buf; struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini; @@ -495,6 +508,20 @@ static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) mutex_init(&qm_list->lock); } +static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_add_tail(&qm->list, &qm_list->list); + mutex_unlock(&qm_list->lock); +} + +static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_del(&qm->list); + mutex_unlock(&qm_list->lock); +} + int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); @@ -540,8 +567,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); void hisi_qm_dev_shutdown(struct pci_dev *pdev); void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); +void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); int hisi_qm_resume(struct device *dev); int hisi_qm_suspend(struct device *dev); void hisi_qm_pm_uninit(struct hisi_qm *qm); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index f2044d5a65..254d4a8981 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -197,6 +197,7 @@ enum hrtimer_base_type { * @max_hang_time: Maximum time spent in hrtimer_interrupt * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are * expired + * @online: CPU is online from an hrtimers point of view * @timer_waiters: A hrtimer_cancel() invocation waits for the timer * callback to finish. * @expires_next: absolute time of the next event, is required for remote @@ -219,7 +220,8 @@ struct hrtimer_cpu_base { unsigned int hres_active : 1, in_hrtirq : 1, hang_detected : 1, - softirq_activated : 1; + softirq_activated : 1, + online : 1; #ifdef CONFIG_HIGH_RES_TIMERS unsigned int nr_events; unsigned short nr_retries; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 31b2927ada..236ec7b63c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -30,7 +30,7 @@ void free_huge_folio(struct folio *folio); #ifdef CONFIG_HUGETLB_PAGE -#include <linux/mempolicy.h> +#include <linux/pagemap.h> #include <linux/shm.h> #include <asm/tlbflush.h> @@ -280,6 +280,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long cp_flags); bool is_hugetlb_entry_migration(pte_t pte); +bool is_hugetlb_entry_hwpoisoned(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); #else /* !CONFIG_HUGETLB_PAGE */ @@ -544,7 +545,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) } struct hugetlbfs_inode_info { - struct shared_policy policy; struct inode vfs_inode; unsigned int seals; }; @@ -748,8 +748,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask); -struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address); int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t idx); void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, @@ -844,6 +842,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h) return huge_page_size(h) / 512; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return filemap_lock_folio(mapping, idx << huge_page_order(h)); +} + #include <asm/hugetlb.h> #ifndef is_hugepage_only_range @@ -1040,6 +1044,12 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio return NULL; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return NULL; +} + static inline int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { @@ -1060,13 +1070,6 @@ alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, return NULL; } -static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h, - struct vm_area_struct *vma, - unsigned long address) -{ - return NULL; -} - static inline int __alloc_bootmem_huge_page(struct hstate *h) { return 0; diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 3d82d91f49..e5d64b8b59 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -22,13 +22,6 @@ struct resv_map; struct file_region; #ifdef CONFIG_CGROUP_HUGETLB -/* - * Minimum page order trackable by hugetlb cgroup. - * At least 3 pages are necessary for all the tracking information. - * The second tail page contains all of the hugetlb-specific fields. - */ -#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE) - enum hugetlb_memory_event { HUGETLB_MAX, HUGETLB_NR_MEMORY_EVENTS, @@ -68,8 +61,6 @@ static inline struct hugetlb_cgroup * __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); - if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER) - return NULL; if (rsvd) return folio->_hugetlb_cgroup_rsvd; else @@ -91,8 +82,6 @@ static inline void __set_hugetlb_cgroup(struct folio *folio, struct hugetlb_cgroup *h_cg, bool rsvd) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); - if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER) - return; if (rsvd) folio->_hugetlb_cgroup_rsvd = h_cg; else diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index 90fa83464f..84ed77c049 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -96,7 +96,7 @@ enum i3c_dcr { /** * struct i3c_device_info - I3C device information - * @pid: Provisional ID + * @pid: Provisioned ID * @bcr: Bus Characteristic Register * @dcr: Device Characteristic Register * @static_addr: static/I2C address diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 0b52da4f23..24c1863b86 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -24,6 +24,12 @@ struct i2c_client; +/* notifier actions. notifier call data is the struct i3c_bus */ +enum { + I3C_NOTIFY_BUS_ADD, + I3C_NOTIFY_BUS_REMOVE, +}; + struct i3c_master_controller; struct i3c_bus; struct i3c_device; @@ -129,6 +135,7 @@ struct i3c_ibi_slot { * rejected by the master * @num_slots: number of IBI slots reserved for this device * @enabled: reflect the IBI status + * @wq: workqueue used to execute IBI handlers. * @handler: IBI handler specified at i3c_device_request_ibi() call time. This * handler will be called from the controller workqueue, and as such * is allowed to sleep (though it is recommended to process the IBI @@ -151,6 +158,7 @@ struct i3c_device_ibi_info { unsigned int max_payload_len; unsigned int num_slots; unsigned int enabled; + struct workqueue_struct *wq; void (*handler)(struct i3c_device *dev, const struct i3c_ibi_payload *payload); }; @@ -166,7 +174,7 @@ struct i3c_device_ibi_info { * assigned a dynamic address by the master. Will be used during * bus initialization to assign it a specific dynamic address * before starting DAA (Dynamic Address Assignment) - * @pid: I3C Provisional ID exposed by the device. This is a unique identifier + * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier * that may be used to attach boardinfo to i3c_dev_desc when the device * does not have a static address * @of_node: optional DT node in case the device has been described in the DT @@ -469,7 +477,7 @@ struct i3c_master_controller_ops { * @boardinfo.i2c: list of I2C boardinfo objects * @boardinfo: board-level information attached to devices connected on the bus * @bus: I3C bus exposed by this master - * @wq: workqueue used to execute IBI handlers. Can also be used by master + * @wq: workqueue which can be used by master * drivers if they need to postpone operations that need to take place * in a thread context. Typical examples are Hot Join processing which * requires taking the bus lock in maintenance, which in turn, can only @@ -652,4 +660,9 @@ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev); +void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data), + void *data); +int i3c_register_notifier(struct notifier_block *nb); +int i3c_unregister_notifier(struct notifier_block *nb); + #endif /* I3C_MASTER_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 2fa186258e..2b5e500bf0 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -307,6 +307,13 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_TRIGGER_TYPE_BQRP 0x6 #define IEEE80211_TRIGGER_TYPE_NFRP 0x7 +/* UL-bandwidth within common_info of trigger frame */ +#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000 +#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0 +#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1 +#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2 +#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3 + struct ieee80211_hdr { __le16 frame_control; __le16 duration_id; @@ -951,17 +958,24 @@ struct ieee80211_wide_bw_chansw_ie { * @dtim_count: DTIM Count * @dtim_period: DTIM Period * @bitmap_ctrl: Bitmap Control + * @required_octet: "Syntatic sugar" to force the struct size to the + * minimum valid size when carried in a non-S1G PPDU * @virtual_map: Partial Virtual Bitmap * * This structure represents the payload of the "TIM element" as - * described in IEEE Std 802.11-2020 section 9.4.2.5. + * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this + * definition is only applicable when the element is carried in a + * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap + * Control and Partial Virtual Bitmap may not be present. */ struct ieee80211_tim_ie { u8 dtim_count; u8 dtim_period; u8 bitmap_ctrl; - /* variable size: 1 - 251 bytes */ - u8 virtual_map[1]; + union { + u8 required_octet; + DECLARE_FLEX_ARRAY(u8, virtual_map); + }; } __packed; /** @@ -1239,6 +1253,30 @@ struct ieee80211_twt_setup { u8 params[]; } __packed; +#define IEEE80211_TTLM_MAX_CNT 2 +#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03 +#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04 +#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08 +#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10 +#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20 + +#define IEEE80211_TTLM_DIRECTION_DOWN 0 +#define IEEE80211_TTLM_DIRECTION_UP 1 +#define IEEE80211_TTLM_DIRECTION_BOTH 2 + +/** + * struct ieee80211_ttlm_elem - TID-To-Link Mapping element + * + * Defined in section 9.4.2.314 in P802.11be_D4 + * + * @control: the first part of control field + * @optional: the second part of control field + */ +struct ieee80211_ttlm_elem { + u8 control; + u8 optional[]; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1674,6 +1712,8 @@ struct ieee80211_mcs_info { #define IEEE80211_HT_MCS_TX_MAX_STREAMS 4 #define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10 +#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3))) + /* * 802.11n D5.0 20.3.5 / 20.6 says: * - indices 0 to 7 and 32 are single spatial stream @@ -3134,6 +3174,28 @@ ieee80211_eht_oper_size_ok(const u8 *data, u8 len) return len >= needed; } +#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1) + +struct ieee80211_bandwidth_indication { + u8 params; + struct ieee80211_eht_operation_info info; +} __packed; + +static inline bool +ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len) +{ + const struct ieee80211_bandwidth_indication *bwi = (const void *)data; + + if (len < sizeof(*bwi)) + return false; + + if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT && + len < sizeof(*bwi) + 2) + return false; + + return true; +} + #define LISTEN_INT_USF GENMASK(15, 14) #define LISTEN_INT_UI GENMASK(13, 0) @@ -3591,6 +3653,8 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_EHT_OPERATION = 106, WLAN_EID_EXT_EHT_MULTI_LINK = 107, WLAN_EID_EXT_EHT_CAPABILITY = 108, + WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109, + WLAN_EID_EXT_BANDWIDTH_INDICATION = 135, }; /* Action category code */ @@ -4458,12 +4522,11 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, /** * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) * @skb: the skb containing the frame, length will not be checked - * @hdr_size: the size of the ieee80211_hdr that starts at skb->data * * This function assumes the frame is a data frame, and that the network header * is in the correct place. */ -static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) +static inline int ieee80211_get_tdls_action(struct sk_buff *skb) { if (!skb_is_nonlinear(skb) && skb->len > (skb_network_offset(skb) + 2)) { @@ -5157,6 +5220,39 @@ static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data, fixed + prof->sta_info_len - 1 <= len; } +static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len) +{ + const struct ieee80211_ttlm_elem *t2l = (const void *)data; + u8 control, fixed = sizeof(*t2l), elem_len = 0; + + if (len < fixed) + return false; + + control = t2l->control; + + if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT) + elem_len += 2; + if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT) + elem_len += 3; + + if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) { + u8 bm_size; + + elem_len += 1; + if (len < fixed + elem_len) + return false; + + if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE) + bm_size = 1; + else + bm_size = 2; + + elem_len += hweight8(t2l->optional[0]) * bm_size; + } + + return len >= fixed + elem_len; +} + #define for_each_mle_subelement(_elem, _data, _len) \ if (ieee80211_mle_size_ok(_data, _len)) \ for_each_element(_elem, \ diff --git a/include/linux/igmp.h b/include/linux/igmp.h index ebf4349a53..5171231f70 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -39,7 +39,7 @@ struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; - __be32 sl_addr[]; + __be32 sl_addr[] __counted_by(sl_max); }; #define IP_SFBLOCK 10 /* allocate this many at once */ diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 7852f6c9a7..719cf9cc6e 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -8,6 +8,8 @@ #ifndef __AD_SIGMA_DELTA_H__ #define __AD_SIGMA_DELTA_H__ +#include <linux/iio/iio.h> + enum ad_sigma_delta_mode { AD_SD_MODE_CONTINUOUS = 0, AD_SD_MODE_SINGLE = 1, @@ -99,7 +101,7 @@ struct ad_sigma_delta { * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp, * rounded to 16 bytes to take into account padding. */ - uint8_t tx_buf[4] ____cacheline_aligned; + uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN); uint8_t rx_buf[16] __aligned(8); }; diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 607c3a89a6..f9ae5cdd88 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -258,9 +258,9 @@ struct st_sensor_data { bool hw_irq_trigger; s64 hw_timestamp; - char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; - struct mutex odr_lock; + + char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN); }; #ifdef CONFIG_IIO_BUFFER diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 202e55b0a2..d0ce3b7110 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -427,6 +427,8 @@ struct iio_trigger; /* forward declaration */ * @write_event_config: set if the event is enabled. * @read_event_value: read a configuration value associated with the event. * @write_event_value: write a configuration value for the event. + * @read_event_label: function to request label name for a specified label, + * for better event identification. * @validate_trigger: function to validate the trigger when the * current trigger gets changed. * @update_scan_mode: function to configure device and scan buffer when @@ -511,6 +513,12 @@ struct iio_info { enum iio_event_direction dir, enum iio_event_info info, int val, int val2); + int (*read_event_label)(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + enum iio_event_type type, + enum iio_event_direction dir, + char *label); + int (*validate_trigger)(struct iio_dev *indio_dev, struct iio_trigger *trig); int (*update_scan_mode)(struct iio_dev *indio_dev, @@ -556,7 +564,9 @@ struct iio_buffer_setup_ops { * and owner * @buffer: [DRIVER] any buffer present * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @available_scan_masks: [DRIVER] optional array of allowed bitmasks + * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the + * array in order of preference, the most preferred + * masks first. * @masklength: [INTERN] the length of the mask established from * channels * @active_scan_mask: [INTERN] union of all scan masks requested by buffers diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index dc9ea299e0..8898966bc0 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -11,6 +11,7 @@ #include <linux/spi/spi.h> #include <linux/interrupt.h> +#include <linux/iio/iio.h> #include <linux/iio/types.h> #define ADIS_WRITE_REG(reg) ((0x80 | (reg))) @@ -131,7 +132,7 @@ struct adis { unsigned long irq_flag; void *buffer; - u8 tx[10] ____cacheline_aligned; + u8 tx[10] __aligned(IIO_DMA_MINALIGN); u8 rx[4]; }; diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h index eff1e6b259..0f7fe7b522 100644 --- a/include/linux/iio/sw_device.h +++ b/include/linux/iio/sw_device.h @@ -51,9 +51,6 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt); struct iio_sw_device *iio_sw_device_create(const char *, const char *); void iio_sw_device_destroy(struct iio_sw_device *); -int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt); -void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt); - static inline void iio_swd_group_init_type_name(struct iio_sw_device *d, const char *name, diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h index 47de2443e9..bc77f88df3 100644 --- a/include/linux/iio/sw_trigger.h +++ b/include/linux/iio/sw_trigger.h @@ -51,9 +51,6 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt); struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *); void iio_sw_trigger_destroy(struct iio_sw_trigger *); -int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt); -void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt); - static inline void iio_swt_group_init_type_name(struct iio_sw_trigger *t, const char *name, diff --git a/include/linux/init.h b/include/linux/init.h index 266c3e1640..01b52c9c75 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -89,9 +89,6 @@ __latent_entropy #define __meminitdata __section(".meminit.data") #define __meminitconst __section(".meminit.rodata") -#define __memexit __section(".memexit.text") __exitused __cold notrace -#define __memexitdata __section(".memexit.data") -#define __memexitconst __section(".memexit.rodata") /* For assembly routines */ #define __HEAD .section ".head.text","ax" diff --git a/include/linux/input.h b/include/linux/input.h index 49790c1bd2..de6503c0ed 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -562,7 +562,7 @@ struct ff_device { int max_effects; struct ff_effect *effects; - struct file *effect_owners[]; + struct file *effect_owners[] __counted_by(max_effects); }; int input_ff_create(struct input_dev *dev, unsigned int max_effects); diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index 3b8580bd33..2cf89a538b 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -47,7 +47,7 @@ struct input_mt { unsigned int flags; unsigned int frame; int *red; - struct input_mt_slot slots[]; + struct input_mt_slot slots[] __counted_by(num_slots); }; static inline void input_mt_set_value(struct input_mt_slot *slot, diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index 04d937ad4d..ee07393445 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -6,6 +6,12 @@ #ifndef _INTEL_TPMI_H_ #define _INTEL_TPMI_H_ +#include <linux/bitfield.h> + +#define TPMI_VERSION_INVALID 0xff +#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val) +#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val) + /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance * @package_id: CPU Package id diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4a1dc88ddb..76121c2bb4 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -566,7 +566,7 @@ enum * * _ RCU: * 1) rcutree_migrate_callbacks() migrates the queue. - * 2) rcu_report_dead() reports the final quiescent states. + * 2) rcutree_report_cpu_dead() reports the final quiescent states. * * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue * diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1b7a44b356..25142a0e2f 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -166,6 +166,10 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 106cdc55ff..aefb73eeee 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -20,8 +20,16 @@ enum io_uring_cmd_flags { IO_URING_F_SQE128 = (1 << 8), IO_URING_F_CQE32 = (1 << 9), IO_URING_F_IOPOLL = (1 << 10), + + /* set when uring wants to cancel a previously issued command */ + IO_URING_F_CANCEL = (1 << 11), + IO_URING_F_COMPAT = (1 << 12), }; +/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ +#define IORING_URING_CMD_CANCELABLE (1U << 30) +#define IORING_URING_CMD_POLLED (1U << 31) + struct io_uring_cmd { struct file *file; const struct io_uring_sqe *sqe; @@ -82,6 +90,9 @@ static inline void io_uring_free(struct task_struct *tsk) __io_uring_free(tsk); } int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); +void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, + unsigned int issue_flags); +struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd); #else static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, struct iov_iter *iter, void *ioucmd) @@ -122,6 +133,14 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd, { return -EOPNOTSUPP; } +static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ +} +static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) +{ + return NULL; +} #endif #endif diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 5fd664fb71..239a4f6880 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -265,6 +265,12 @@ struct io_ring_ctx { */ struct io_wq_work_list iopoll_list; bool poll_multi_queue; + + /* + * Any cancelable uring_cmd is added to this list in + * ->uring_cmd() by io_uring_cmd_insert_cancelable() + */ + struct hlist_head cancelable_uring_cmd; } ____cacheline_aligned_in_smp; struct { @@ -313,6 +319,13 @@ struct io_ring_ctx { struct list_head cq_overflow_list; struct io_hash_table cancel_table; + struct hlist_head waitid_list; + +#ifdef CONFIG_FUTEX + struct hlist_head futex_list; + struct io_alloc_cache futex_cache; +#endif + const struct cred *sq_creds; /* cred used for __io_sq_thread() */ struct io_sq_data *sq_data; /* if using sq thread polling */ @@ -345,8 +358,6 @@ struct io_ring_ctx { struct wait_queue_head rsrc_quiesce_wq; unsigned rsrc_quiesce; - struct list_head io_buffers_pages; - #if defined(CONFIG_UNIX) struct socket *ring_sock; #endif @@ -423,6 +434,7 @@ enum { /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, + REQ_F_POLL_NO_LAZY_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -490,6 +502,8 @@ enum { REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), + /* don't use lazy poll wake for this request */ + REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0225cf7445..81553770e4 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/of.h> +#include <linux/iova_bitmap.h> #include <uapi/linux/iommu.h> #define IOMMU_READ (1 << 0) @@ -37,6 +38,7 @@ struct bus_type; struct device; struct iommu_domain; struct iommu_domain_ops; +struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -64,6 +66,10 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ +#define __IOMMU_DOMAIN_PLATFORM (1U << 5) + +#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested + on a stage-2 translation */ #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ /* @@ -81,6 +87,8 @@ struct iommu_domain_geometry { * invalidation. * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses * represented by mm_struct's. + * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own + * dma_api stuff. Do not use in new drivers. */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) @@ -91,10 +99,14 @@ struct iommu_domain_geometry { __IOMMU_DOMAIN_DMA_API | \ __IOMMU_DOMAIN_DMA_FQ) #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) +#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) +#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; @@ -133,6 +145,7 @@ enum iommu_cap { * usefully support the non-strict DMA flush queue. */ IOMMU_CAP_DEFERRED_FLUSH, + IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ }; /* These are the possible reserved region types */ @@ -228,20 +241,115 @@ struct iommu_iotlb_gather { }; /** + * struct iommu_dirty_bitmap - Dirty IOVA bitmap state + * @bitmap: IOVA bitmap + * @gather: Range information for a pending IOTLB flush + */ +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + +/** + * struct iommu_dirty_ops - domain specific dirty tracking operations + * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain + * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled + * into a bitmap, with a bit represented as a page. + * Reads the dirty PTE bits and clears it from IO + * pagetables. + */ +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); + int (*read_and_clear_dirty)(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); +}; + +/** + * struct iommu_user_data - iommu driver specific user space data info + * @type: The data type of the user buffer + * @uptr: Pointer to the user buffer for copy_from_user() + * @len: The length of the user buffer in bytes + * + * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h + * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. + */ +struct iommu_user_data { + unsigned int type; + void __user *uptr; + size_t len; +}; + +/** + * __iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @dst_data: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @src_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @dst_data. Must match with @src_data.type + * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) + * @min_len: Initial length of user data structure for backward compatibility. + * This should be offsetofend using the last member in the user data + * struct that was initially added to include/uapi/linux/iommufd.h + */ +static inline int __iommu_copy_struct_from_user( + void *dst_data, const struct iommu_user_data *src_data, + unsigned int data_type, size_t data_len, size_t min_len) +{ + if (src_data->type != data_type) + return -EINVAL; + if (WARN_ON(!dst_data || !src_data)) + return -EINVAL; + if (src_data->len < min_len || data_len < src_data->len) + return -EINVAL; + return copy_struct_from_user(dst_data, data_len, src_data->uptr, + src_data->len); +} + +/** + * iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @kdst: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @user_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @kdst. Must match with @user_data->type + * @min_last: The last memember of the data structure @kdst points in the + * initial version. + * Return 0 for success, otherwise -error. + */ +#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ + __iommu_copy_struct_from_user(kdst, user_data, data_type, \ + sizeof(*kdst), \ + offsetofend(typeof(*kdst), min_last)) + +/** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @hw_info: report iommu hardware information. The data buffer returned by this * op is allocated in the iommu driver and freed by the caller after * use. The information type is one of enum iommu_hw_info_type defined * in include/uapi/linux/iommufd.h. - * @domain_alloc: allocate iommu domain + * @domain_alloc: allocate and return an iommu domain if success. Otherwise + * NULL is returned. The domain is not fully initialized until + * the caller iommu_domain_alloc() returns. + * @domain_alloc_user: Allocate an iommu domain corresponding to the input + * parameters as defined in include/uapi/linux/iommufd.h. + * Unlike @domain_alloc, it is called only by IOMMUFD and + * must fully initialize the new domain before return. + * Upon success, if the @user_data is valid and the @parent + * points to a kernel-managed domain, the new domain must be + * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be + * NULL while the @user_data can be optionally provided, the + * new domain must support __IOMMU_DOMAIN_PAGING. + * Upon failure, ERR_PTR must be returned. + * @domain_alloc_paging: Allocate an iommu_domain that can be used for + * UNMANAGED, DMA, and DMA_FQ domain types. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain - * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op - * is to support old IOMMU drivers, new drivers should use - * default domains, and the common IOMMU DMA ops. * @device_group: find iommu group for a particular device * @get_resv_regions: Request list of reserved regions for a device * @of_xlate: add OF master IDs to iommu grouping @@ -260,6 +368,13 @@ struct iommu_iotlb_gather { * will be blocked by the hardware. * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops + * @identity_domain: An always available, always attachable identity + * translation. + * @blocked_domain: An always available, always attachable blocking + * translation. + * @default_domain: If not NULL this will always be set as the default domain. + * This should be an IDENTITY/BLOCKED/PLATFORM domain. + * Do not use in new drivers. */ struct iommu_ops { bool (*capable)(struct device *dev, enum iommu_cap); @@ -267,11 +382,14 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + struct iommu_domain *(*domain_alloc_user)( + struct device *dev, u32 flags, struct iommu_domain *parent, + const struct iommu_user_data *user_data); + struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); - void (*set_platform_dma_ops)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); /* Request/Free a list of reserved regions for a device */ @@ -294,6 +412,9 @@ struct iommu_ops { const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; + struct iommu_domain *identity_domain; + struct iommu_domain *blocked_domain; + struct iommu_domain *default_domain; }; /** @@ -312,10 +433,8 @@ struct iommu_ops { * * ENODEV - device specific errors, not able to be attached * * <others> - treated as ENODEV by the caller. Use is discouraged * @set_dev_pasid: set an iommu domain to a pasid of device - * @map: map a physically contiguous memory region to an iommu domain * @map_pages: map a physically contiguous set of pages of the same size to * an iommu domain. - * @unmap: unmap a physically contiguous memory region from an iommu domain * @unmap_pages: unmap a number of pages of the same size from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware @@ -334,20 +453,16 @@ struct iommu_domain_ops { int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp); int (*map_pages)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped); - size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *iotlb_gather); size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, - size_t size); + int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, + size_t size); void (*iotlb_sync)(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); @@ -368,6 +483,7 @@ struct iommu_domain_ops { * @list: Used by the iommu-core to keep a list of registered iommus * @ops: iommu-ops for talking to this iommu * @dev: struct device for sysfs handling + * @singleton_group: Used internally for drivers that have only one group * @max_pasids: number of supported PASIDs */ struct iommu_device { @@ -375,6 +491,7 @@ struct iommu_device { const struct iommu_ops *ops; struct fwnode_handle *fwnode; struct device *dev; + struct iommu_group *singleton_group; u32 max_pasids; }; @@ -418,6 +535,7 @@ struct iommu_fault_param { * @attach_deferred: the dma domain attachment is deferred * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs * @require_direct: device requires IOMMU_RESV_DIRECT regions + * @shadow_on_flush: IOTLB flushes are used to sync shadow tables * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; @@ -433,6 +551,7 @@ struct dev_iommu { u32 attach_deferred:1; u32 pci_32bit_workaround:1; u32 require_direct:1; + u32 shadow_on_flush:1; }; int iommu_device_register(struct iommu_device *iommu, @@ -632,12 +751,35 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return gather && gather->queued; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ + if (gather) + iommu_iotlb_gather_init(gather); + + dirty->bitmap = bitmap; + dirty->gather = gather; +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ + if (dirty->bitmap) + iova_bitmap_set(dirty->bitmap, iova, length); + + if (dirty->gather) + iommu_iotlb_gather_add_range(dirty->gather, iova, length); +} + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ extern struct iommu_group *generic_device_group(struct device *dev); /* FSL-MC device grouping function */ struct iommu_group *fsl_mc_device_group(struct device *dev); +extern struct iommu_group *generic_single_device_group(struct device *dev); /** * struct iommu_fwspec - per-device IOMMU instance data @@ -738,6 +880,8 @@ struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; +struct iommu_dirty_bitmap {}; +struct iommu_dirty_ops {}; static inline bool iommu_present(const struct bus_type *bus) { @@ -970,6 +1114,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return false; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } @@ -1110,7 +1266,7 @@ static inline void iommu_free_global_pasid(ioasid_t pasid) {} * Creates a mapping at @iova for the buffer described by a scatterlist * stored in the given sg_table object in the provided IOMMU domain. */ -static inline size_t iommu_map_sgtable(struct iommu_domain *domain, +static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain, unsigned long iova, struct sg_table *sgt, int prot) { return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, @@ -1190,6 +1346,12 @@ static inline bool mm_valid_pasid(struct mm_struct *mm) { return mm->pasid != IOMMU_PASID_INVALID; } + +static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) +{ + return mm->pasid; +} + void mm_pasid_drop(struct mm_struct *mm); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm); @@ -1212,6 +1374,12 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) } static inline void mm_pasid_init(struct mm_struct *mm) {} static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } + +static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) +{ + return IOMMU_PASID_INVALID; +} + static inline void mm_pasid_drop(struct mm_struct *mm) {} #endif /* CONFIG_IOMMU_SVA */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 25d768d489..14f5cfabbb 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -229,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res) return res->flags & IORESOURCE_EXT_TYPE_BITS; } /* True iff r1 completely contains r2 */ -static inline bool resource_contains(struct resource *r1, struct resource *r2) +static inline bool resource_contains(const struct resource *r1, const struct resource *r2) { if (resource_type(r1) != resource_type(r2)) return false; @@ -239,13 +239,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2) } /* True if any part of r1 overlaps r2 */ -static inline bool resource_overlaps(struct resource *r1, struct resource *r2) +static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2) { return r1->start <= r2->end && r1->end >= r2->start; } -static inline bool -resource_intersection(struct resource *r1, struct resource *r2, struct resource *r) +static inline bool resource_intersection(const struct resource *r1, const struct resource *r2, + struct resource *r) { if (!resource_overlaps(r1, r2)) return false; @@ -254,8 +254,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource return true; } -static inline bool -resource_union(struct resource *r1, struct resource *r2, struct resource *r) +static inline bool resource_union(const struct resource *r1, const struct resource *r2, + struct resource *r) { if (!resource_overlaps(r1, r2)) return false; diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index cb71aa616b..1b06d074ad 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -426,7 +426,7 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * iosys_map_rd_field - Read a member from a struct in the iosys_map * * @map__: The iosys_map structure - * @struct_offset__: Offset from the beggining of the map, where the struct + * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read @@ -494,7 +494,7 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * iosys_map_wr_field - Write to a member of a struct in the iosys_map * * @map__: The iosys_map structure - * @struct_offset__: Offset from the beggining of the map, where the struct + * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h new file mode 100644 index 0000000000..270454a670 --- /dev/null +++ b/include/linux/iov_iter.h @@ -0,0 +1,274 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* I/O iterator iteration building functions. + * + * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_IOV_ITER_H +#define _LINUX_IOV_ITER_H + +#include <linux/uio.h> +#include <linux/bvec.h> + +typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len, + void *priv, void *priv2); +typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len, + void *priv, void *priv2); + +/* + * Handle ITER_UBUF. + */ +static __always_inline +size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_ustep_f step) +{ + void __user *base = iter->ubuf; + size_t progress = 0, remain; + + remain = step(base + iter->iov_offset, 0, len, priv, priv2); + progress = len - remain; + iter->iov_offset += progress; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_IOVEC. + */ +static __always_inline +size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_ustep_f step) +{ + const struct iovec *p = iter->__iov; + size_t progress = 0, skip = iter->iov_offset; + + do { + size_t remain, consumed; + size_t part = min(len, p->iov_len - skip); + + if (likely(part)) { + remain = step(p->iov_base + skip, progress, part, priv, priv2); + consumed = part - remain; + progress += consumed; + skip += consumed; + len -= consumed; + if (skip < p->iov_len) + break; + } + p++; + skip = 0; + } while (len); + + iter->nr_segs -= p - iter->__iov; + iter->__iov = p; + iter->iov_offset = skip; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_KVEC. + */ +static __always_inline +size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + const struct kvec *p = iter->kvec; + size_t progress = 0, skip = iter->iov_offset; + + do { + size_t remain, consumed; + size_t part = min(len, p->iov_len - skip); + + if (likely(part)) { + remain = step(p->iov_base + skip, progress, part, priv, priv2); + consumed = part - remain; + progress += consumed; + skip += consumed; + len -= consumed; + if (skip < p->iov_len) + break; + } + p++; + skip = 0; + } while (len); + + iter->nr_segs -= p - iter->kvec; + iter->kvec = p; + iter->iov_offset = skip; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_BVEC. + */ +static __always_inline +size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + const struct bio_vec *p = iter->bvec; + size_t progress = 0, skip = iter->iov_offset; + + do { + size_t remain, consumed; + size_t offset = p->bv_offset + skip, part; + void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE); + + part = min3(len, + (size_t)(p->bv_len - skip), + (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); + remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2); + kunmap_local(kaddr); + consumed = part - remain; + len -= consumed; + progress += consumed; + skip += consumed; + if (skip >= p->bv_len) { + skip = 0; + p++; + } + if (remain) + break; + } while (len); + + iter->nr_segs -= p - iter->bvec; + iter->bvec = p; + iter->iov_offset = skip; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_XARRAY. + */ +static __always_inline +size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + struct folio *folio; + size_t progress = 0; + loff_t start = iter->xarray_start + iter->iov_offset; + pgoff_t index = start / PAGE_SIZE; + XA_STATE(xas, iter->xarray, index); + + rcu_read_lock(); + xas_for_each(&xas, folio, ULONG_MAX) { + size_t remain, consumed, offset, part, flen; + + if (xas_retry(&xas, folio)) + continue; + if (WARN_ON(xa_is_value(folio))) + break; + if (WARN_ON(folio_test_hugetlb(folio))) + break; + + offset = offset_in_folio(folio, start + progress); + flen = min(folio_size(folio) - offset, len); + + while (flen) { + void *base = kmap_local_folio(folio, offset); + + part = min_t(size_t, flen, + PAGE_SIZE - offset_in_page(offset)); + remain = step(base, progress, part, priv, priv2); + kunmap_local(base); + + consumed = part - remain; + progress += consumed; + len -= consumed; + + if (remain || len == 0) + goto out; + flen -= consumed; + offset += consumed; + } + } + +out: + rcu_read_unlock(); + iter->iov_offset += progress; + iter->count -= progress; + return progress; +} + +/* + * Handle ITER_DISCARD. + */ +static __always_inline +size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + size_t progress = len; + + iter->count -= progress; + return progress; +} + +/** + * iterate_and_advance2 - Iterate over an iterator + * @iter: The iterator to iterate over. + * @len: The amount to iterate over. + * @priv: Data for the step functions. + * @priv2: More data for the step functions. + * @ustep: Function for UBUF/IOVEC iterators; given __user addresses. + * @step: Function for other iterators; given kernel addresses. + * + * Iterate over the next part of an iterator, up to the specified length. The + * buffer is presented in segments, which for kernel iteration are broken up by + * physical pages and mapped, with the mapped address being presented. + * + * Two step functions, @step and @ustep, must be provided, one for handling + * mapped kernel addresses and the other is given user addresses which have the + * potential to fault since no pinning is performed. + * + * The step functions are passed the address and length of the segment, @priv, + * @priv2 and the amount of data so far iterated over (which can, for example, + * be added to @priv to point to the right part of a second buffer). The step + * functions should return the amount of the segment they didn't process (ie. 0 + * indicates complete processsing). + * + * This function returns the amount of data processed (ie. 0 means nothing was + * processed and the value of @len means processes to completion). + */ +static __always_inline +size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv, + void *priv2, iov_ustep_f ustep, iov_step_f step) +{ + if (unlikely(iter->count < len)) + len = iter->count; + if (unlikely(!len)) + return 0; + + if (likely(iter_is_ubuf(iter))) + return iterate_ubuf(iter, len, priv, priv2, ustep); + if (likely(iter_is_iovec(iter))) + return iterate_iovec(iter, len, priv, priv2, ustep); + if (iov_iter_is_bvec(iter)) + return iterate_bvec(iter, len, priv, priv2, step); + if (iov_iter_is_kvec(iter)) + return iterate_kvec(iter, len, priv, priv2, step); + if (iov_iter_is_xarray(iter)) + return iterate_xarray(iter, len, priv, priv2, step); + return iterate_discard(iter, len, priv, priv2, step); +} + +/** + * iterate_and_advance - Iterate over an iterator + * @iter: The iterator to iterate over. + * @len: The amount to iterate over. + * @priv: Data for the step functions. + * @ustep: Function for UBUF/IOVEC iterators; given __user addresses. + * @step: Function for other iterators; given kernel addresses. + * + * As iterate_and_advance2(), but priv2 is always NULL. + */ +static __always_inline +size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv, + iov_ustep_f ustep, iov_step_f step) +{ + return iterate_and_advance2(iter, len, priv, NULL, ustep, step); +} + +#endif /* _LINUX_IOV_ITER_H */ diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h index c006cf0a25..1c338f5e5b 100644 --- a/include/linux/iova_bitmap.h +++ b/include/linux/iova_bitmap.h @@ -7,6 +7,7 @@ #define _IOVA_BITMAP_H_ #include <linux/types.h> +#include <linux/errno.h> struct iova_bitmap; @@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, unsigned long iova, size_t length, void *opaque); +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data); @@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn); void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length); +#else +static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, + size_t length, + unsigned long page_size, + u64 __user *data) +{ + return NULL; +} + +static inline void iova_bitmap_free(struct iova_bitmap *bitmap) +{ +} + +static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + return -EOPNOTSUPP; +} + +static inline void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ +} +#endif #endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index af8a771a05..5e605e384a 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -82,6 +82,7 @@ struct ipv6_devconf { __u32 ioam6_id_wide; __u8 ioam6_enabled; __u8 ndisc_evict_nocarrier; + __u8 ra_honor_pio_life; struct ctl_table_header *sysctl_header; }; @@ -213,28 +214,9 @@ struct ipv6_pinfo { __be32 flow_label; __u32 frag_size; - /* - * Packed in 16bits. - * Omit one shift by putting the signed field at MSB. - */ -#if defined(__BIG_ENDIAN_BITFIELD) - __s16 hop_limit:9; - __u16 __unused_1:7; -#else - __u16 __unused_1:7; - __s16 hop_limit:9; -#endif + s16 hop_limit; + u8 mcast_hops; -#if defined(__BIG_ENDIAN_BITFIELD) - /* Packed in 16bits. */ - __s16 mcast_hops:9; - __u16 __unused_2:6, - mc_loop:1; -#else - __u16 mc_loop:1, - __unused_2:6; - __s16 mcast_hops:9; -#endif int ucast_oif; int mcast_oif; @@ -262,21 +244,11 @@ struct ipv6_pinfo { } rxopt; /* sockopt flags */ - __u16 recverr:1, - sndflow:1, - repflow:1, - pmtudisc:3, - padding:1, /* 1 bit hole */ - srcprefs:3, /* 001: prefer temporary address + __u8 srcprefs; /* 001: prefer temporary address * 010: prefer public address * 100: prefer care-of address */ - dontfrag:1, - autoflowlabel:1, - autoflowlabel_set:1, - mc_all:1, - recverr_rfc4884:1, - rtalert_isolate:1; + __u8 pmtudisc; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; @@ -293,6 +265,18 @@ struct ipv6_pinfo { struct inet6_cork cork; }; +/* We currently use available bits from inet_sk(sk)->inet_flags, + * this could change in the future. + */ +#define inet6_test_bit(nr, sk) \ + test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet6_set_bit(nr, sk) \ + set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet6_clear_bit(nr, sk) \ + clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) +#define inet6_assign_bit(nr, sk, val) \ + assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val) + /* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */ struct raw6_sock { /* inet_sock has to be the first member of raw6_sock */ diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 8cd11a2232..136f2980cb 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -66,6 +66,9 @@ void irq_work_sync(struct irq_work *work); void irq_work_run(void); bool irq_work_needs_cpu(void); void irq_work_single(void *arg); + +void arch_irq_work_raise(void); + #else static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 51c254b7fe..ee0a82c605 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -174,7 +174,7 @@ struct irq_domain { irq_hw_number_t hwirq_max; unsigned int revmap_size; struct radix_tree_root revmap_tree; - struct irq_data __rcu *revmap[]; + struct irq_data __rcu *revmap[] __counted_by(revmap_size); }; /* Irq domain flags */ diff --git a/include/linux/iversion.h b/include/linux/iversion.h index f174ff1b59..8f972eaca2 100644 --- a/include/linux/iversion.h +++ b/include/linux/iversion.h @@ -256,7 +256,7 @@ inode_peek_iversion(const struct inode *inode) * For filesystems without any sort of change attribute, the best we can * do is fake one up from the ctime: */ -static inline u64 time_to_chattr(struct timespec64 *t) +static inline u64 time_to_chattr(const struct timespec64 *t) { u64 chattr = t->tv_sec; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0fc6c1f512..beb30719ee 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -886,7 +886,7 @@ struct journal_s * Journal head shrinker, reclaim buffer's journal head which * has been written back. */ - struct shrinker j_shrinker; + struct shrinker *j_shrinker; /** * @j_checkpoint_jh_count: diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 71fa9a40fb..72cb693b07 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -285,8 +285,10 @@ static inline bool kasan_check_byte(const void *address) #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) void kasan_unpoison_task_stack(struct task_struct *task); +asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); #else static inline void kasan_unpoison_task_stack(struct task_struct *task) {} +static inline void kasan_unpoison_task_stack_below(const void *watermark) {} #endif #ifdef CONFIG_KASAN_GENERIC diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cee8fe87e9..d9ad21058e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -13,6 +13,7 @@ #include <linux/stdarg.h> #include <linux/align.h> +#include <linux/array_size.h> #include <linux/limits.h> #include <linux/linkage.h> #include <linux/stddef.h> @@ -50,12 +51,6 @@ #define READ 0 #define WRITE 1 -/** - * ARRAY_SIZE - get the number of elements in array @arr - * @arr: array to be sized - */ -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) - #define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) #define u64_to_user_ptr(x) ( \ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 2a36f3218b..99aaa050cc 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -316,6 +316,7 @@ struct kernfs_ops { struct poll_table_struct *pt); int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); + loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence); }; /* diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 32c7807855..8227455192 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -22,10 +22,6 @@ #include <uapi/linux/kexec.h> #include <linux/verification.h> -/* Location of a reserved region to hold the crash kernel. - */ -extern struct resource crashk_res; -extern struct resource crashk_low_res; extern note_buf_t __percpu *crash_notes; #ifdef CONFIG_KEXEC_CORE diff --git a/include/linux/key.h b/include/linux/key.h index 938d7ecfb4..943a432da3 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -515,6 +515,7 @@ extern void key_init(void); #define key_init() do { } while(0) #define key_free_user_ns(ns) do { } while(0) #define key_remove_domain(d) do { } while(0) +#define key_lookup(k) NULL #endif /* CONFIG_KEYS */ #endif /* __KERNEL__ */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 8de5d51a0b..0ff44d6633 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -26,8 +26,7 @@ #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/ftrace.h> -#include <linux/refcount.h> -#include <linux/freelist.h> +#include <linux/objpool.h> #include <linux/rethook.h> #include <asm/kprobes.h> @@ -141,7 +140,7 @@ static inline bool kprobe_ftrace(struct kprobe *p) */ struct kretprobe_holder { struct kretprobe __rcu *rp; - refcount_t ref; + struct objpool_head pool; }; struct kretprobe { @@ -154,7 +153,6 @@ struct kretprobe { #ifdef CONFIG_KRETPROBE_ON_RETHOOK struct rethook *rh; #else - struct freelist_head freelist; struct kretprobe_holder *rph; #endif }; @@ -165,10 +163,7 @@ struct kretprobe_instance { #ifdef CONFIG_KRETPROBE_ON_RETHOOK struct rethook_node node; #else - union { - struct freelist_node freelist; - struct rcu_head rcu; - }; + struct rcu_head rcu; struct llist_node llist; struct kretprobe_holder *rph; kprobe_opcode_t *ret_addr; @@ -445,6 +440,10 @@ int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, char *type, char *sym); + +int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); + #else /* !CONFIG_KPROBES: */ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h index 529974e22e..7fcf29a4e0 100644 --- a/include/linux/kstrtox.h +++ b/include/linux/kstrtox.h @@ -147,9 +147,4 @@ extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); -static inline int strtobool(const char *s, bool *res) -{ - return kstrtobool(s, res); -} - #endif /* _LINUX_KSTRTOX_H */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 2c30ade43b..b11f53c1ba 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -86,6 +86,7 @@ void free_kthread_struct(struct task_struct *k); void kthread_bind(struct task_struct *k, unsigned int cpu); void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); int kthread_stop(struct task_struct *k); +int kthread_stop_put(struct task_struct *k); bool kthread_should_stop(void); bool kthread_should_park(void); bool kthread_should_stop_or_park(void); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb6c6109fd..4944136efa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -664,7 +664,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[]; + struct hlist_head map[] __counted_by(nr_rt_entries); }; #endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 2a7d2af0ed..1dbb14dacc 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -656,7 +656,7 @@ struct ata_cpr { struct ata_cpr_log { u8 nr_cpr; - struct ata_cpr cpr[]; + struct ata_cpr cpr[] __counted_by(nr_cpr); }; struct ata_device { @@ -1561,6 +1561,11 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...); extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, const char *name); #endif +static inline void ata_port_desc_misc(struct ata_port *ap, int irq) +{ + ata_port_desc(ap, "irq %d", irq); + ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy); +} static inline bool ata_tag_internal(unsigned int tag) { @@ -1881,23 +1886,21 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies, change in future hardware and specs, secondly 0xFF means 'no DMA' but is > UDMA_0. Dyma ddreigiau */ -static inline int ata_using_mwdma(struct ata_device *adev) +static inline bool ata_using_mwdma(struct ata_device *adev) { - if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) - return 1; - return 0; + return adev->dma_mode >= XFER_MW_DMA_0 && + adev->dma_mode <= XFER_MW_DMA_4; } -static inline int ata_using_udma(struct ata_device *adev) +static inline bool ata_using_udma(struct ata_device *adev) { - if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) - return 1; - return 0; + return adev->dma_mode >= XFER_UDMA_0 && + adev->dma_mode <= XFER_UDMA_7; } -static inline int ata_dma_enabled(struct ata_device *adev) +static inline bool ata_dma_enabled(struct ata_device *adev) { - return (adev->dma_mode == 0xFF ? 0 : 1); + return adev->dma_mode != 0xFF; } /************************************************************************** diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index 15e0e0209d..7303b4bc2c 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -43,15 +43,6 @@ static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) __set_bit(nr, addr); } -static inline void linkmode_set_bit_array(const int *array, int array_size, - unsigned long *addr) -{ - int i; - - for (i = 0; i < array_size; i++) - linkmode_set_bit(array[i], addr); -} - static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) { __clear_bit(nr, addr); @@ -71,6 +62,15 @@ static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) return test_bit(nr, addr); } +static inline void linkmode_set_bit_array(const int *array, int array_size, + unsigned long *addr) +{ + int i; + + for (i = 0; i < array_size; i++) + linkmode_set_bit(array[i], addr); +} + static inline int linkmode_equal(const unsigned long *src1, const unsigned long *src2) { diff --git a/include/linux/list.h b/include/linux/list.h index 164b4d0e9d..1837caedf7 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -687,6 +687,14 @@ static inline void list_splice_tail_init(struct list_head *list, for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next) /** + * list_for_each_reverse - iterate backwards over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_reverse(pos, head) \ + for (pos = (head)->prev; pos != (head); pos = pos->prev) + +/** * list_for_each_rcu - Iterate over a list in an RCU-safe fashion * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/llist.h b/include/linux/llist.h index 85bda2d02d..2c982ff747 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -74,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list) } /** + * init_llist_node - initialize lock-less list node + * @node: the node to be initialised + * + * In cases where there is a need to test if a node is on + * a list or not, this initialises the node to clearly + * not be on any list. + */ +static inline void init_llist_node(struct llist_node *node) +{ + node->next = node; +} + +/** + * llist_on_list - test if a lock-list list node is on a list + * @node: the node to test + * + * When a node is on a list the ->next pointer will be NULL or + * some other node. It can never point to itself. We use that + * in init_llist_node() to record that a node is not on any list, + * and here to test whether it is on any list. + */ +static inline bool llist_on_list(const struct llist_node *node) +{ + return node->next != node; +} + +/** * llist_entry - get the struct of this entry * @ptr: the &struct llist_node pointer. * @type: the type of the struct this is embedded in. @@ -249,6 +276,25 @@ static inline struct llist_node *__llist_del_all(struct llist_head *head) extern struct llist_node *llist_del_first(struct llist_head *head); +/** + * llist_del_first_init - delete first entry from lock-list and mark is as being off-list + * @head: the head of lock-less list to delete from. + * + * This behave the same as llist_del_first() except that llist_init_node() is called + * on the returned node so that llist_on_list() will report false for the node. + */ +static inline struct llist_node *llist_del_first_init(struct llist_head *head) +{ + struct llist_node *n = llist_del_first(head); + + if (n) + init_llist_node(n); + return n; +} + +extern bool llist_del_first_this(struct llist_head *head, + struct llist_node *this); + struct llist_node *llist_reverse_order(struct llist_node *head); #endif /* LLIST_H */ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 0f016d69c9..9f565416d1 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -282,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, struct nlm_lock *, struct nlm_cookie *); __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); -void nlmsvc_retry_blocked(void); +void nlmsvc_retry_blocked(struct svc_rqst *rqstp); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t match); void nlmsvc_grant_reply(struct nlm_cookie *, __be32); diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h index 54945aa824..babf4e3c28 100644 --- a/include/linux/logic_pio.h +++ b/include/linux/logic_pio.h @@ -39,9 +39,6 @@ struct logic_pio_host_ops { #ifdef CONFIG_INDIRECT_PIO u8 logic_inb(unsigned long addr); -void logic_outb(u8 value, unsigned long addr); -void logic_outw(u16 value, unsigned long addr); -void logic_outl(u32 value, unsigned long addr); u16 logic_inw(unsigned long addr); u32 logic_inl(unsigned long addr); void logic_outb(u8 value, unsigned long addr); diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 74c60f9446..ed3d517460 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -43,17 +43,17 @@ LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old, const kernel_cap_t *permitted) LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) -LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb) +LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb) LSM_HOOK(int, 0, quota_on, struct dentry *dentry) LSM_HOOK(int, 0, syslog, int type) LSM_HOOK(int, 0, settime, const struct timespec64 *ts, const struct timezone *tz) LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages) LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm) -LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file) +LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file) LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) -LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) -LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm) LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) @@ -66,7 +66,7 @@ LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts) -LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb) +LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb) LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb) LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry) LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path, @@ -311,9 +311,9 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) -LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock, +LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) -LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock, +LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock, struct sk_buff *skb, u32 *secid) LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) diff --git a/include/linux/lwq.h b/include/linux/lwq.h new file mode 100644 index 0000000000..d081d5cf8e --- /dev/null +++ b/include/linux/lwq.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef LWQ_H +#define LWQ_H +/* + * Light-weight single-linked queue built from llist + * + * Entries can be enqueued from any context with no locking. + * Entries can be dequeued from process context with integrated locking. + * + * This is particularly suitable when work items are queued in + * BH or IRQ context, and where work items are handled one at a time + * by dedicated threads. + */ +#include <linux/container_of.h> +#include <linux/spinlock.h> +#include <linux/llist.h> + +struct lwq_node { + struct llist_node node; +}; + +struct lwq { + spinlock_t lock; + struct llist_node *ready; /* entries to be dequeued */ + struct llist_head new; /* entries being enqueued */ +}; + +/** + * lwq_init - initialise a lwq + * @q: the lwq object + */ +static inline void lwq_init(struct lwq *q) +{ + spin_lock_init(&q->lock); + q->ready = NULL; + init_llist_head(&q->new); +} + +/** + * lwq_empty - test if lwq contains any entry + * @q: the lwq object + * + * This empty test contains an acquire barrier so that if a wakeup + * is sent when lwq_dequeue returns true, it is safe to go to sleep after + * a test on lwq_empty(). + */ +static inline bool lwq_empty(struct lwq *q) +{ + /* acquire ensures ordering wrt lwq_enqueue() */ + return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new); +} + +struct llist_node *__lwq_dequeue(struct lwq *q); +/** + * lwq_dequeue - dequeue first (oldest) entry from lwq + * @q: the queue to dequeue from + * @type: the type of object to return + * @member: them member in returned object which is an lwq_node. + * + * Remove a single object from the lwq and return it. This will take + * a spinlock and so must always be called in the same context, typcially + * process contet. + */ +#define lwq_dequeue(q, type, member) \ + ({ struct llist_node *_n = __lwq_dequeue(q); \ + _n ? container_of(_n, type, member.node) : NULL; }) + +struct llist_node *lwq_dequeue_all(struct lwq *q); + +/** + * lwq_for_each_safe - iterate over detached queue allowing deletion + * @_n: iterator variable + * @_t1: temporary struct llist_node ** + * @_t2: temporary struct llist_node * + * @_l: address of llist_node pointer from lwq_dequeue_all() + * @_member: member in _n where lwq_node is found. + * + * Iterate over members in a dequeued list. If the iterator variable + * is set to NULL, the iterator removes that entry from the queue. + */ +#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \ + for (_t1 = (_l); \ + *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\ + _t2 = ((*_t1)->next), \ + true) \ + : false; \ + (_n) ? (_t1 = &(_n)->_member.node.next, 0) \ + : ((*(_t1) = (_t2)), 0)) + +/** + * lwq_enqueue - add a new item to the end of the queue + * @n - the lwq_node embedded in the item to be added + * @q - the lwq to append to. + * + * No locking is needed to append to the queue so this can + * be called from any context. + * Return %true is the list may have previously been empty. + */ +static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q) +{ + /* acquire enqures ordering wrt lwq_dequeue */ + return llist_add(&n->node, &q->new) && + smp_load_acquire(&q->ready) == NULL; +} + +/** + * lwq_enqueue_batch - add a list of new items to the end of the queue + * @n - the lwq_node embedded in the first item to be added + * @q - the lwq to append to. + * + * No locking is needed to append to the queue so this can + * be called from any context. + * Return %true is the list may have previously been empty. + */ +static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q) +{ + struct llist_node *e = n; + + /* acquire enqures ordering wrt lwq_dequeue */ + return llist_add_batch(llist_reverse_order(n), e, &q->new) && + smp_load_acquire(&q->ready) == NULL; +} +#endif /* LWQ_H */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 8fa23bdced..007fd9c3e4 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -420,7 +420,7 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising) * A function that translates value of following registers to the linkmode: * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20) * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60) - * IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61) + * IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61) */ static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val) { diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index fd6e062065..b38a56a13f 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -31,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); * @rx_work: async work to execute Rx event callback * @rx_cb: Drivers register this callback to get asynchronous ME * Rx buffer pending notifications. - * @notif_work: async work to execute FW notif event callback + * @notif_work: async work to execute FW notify event callback * @notif_cb: Drivers register this callback to get asynchronous ME * FW notification pending notifications. * - * @do_match: wheather device can be matched with a driver + * @do_match: whether the device can be matched with a driver * @is_added: device is already scanned * @priv_data: client private data */ @@ -94,15 +94,23 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length); +ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, + size_t length, unsigned long timeout); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length, + unsigned long timeout); ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, size_t length, u8 vtag); +ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag, unsigned long timeout); ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); +ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 *vtag, unsigned long timeout); int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 1c1072e3ca..ccf0176ba3 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -40,6 +40,8 @@ extern unsigned long long max_possible_pfn; * via a driver, and never indicated in the firmware-provided memory map as * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the * kernel resource tree. + * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are + * not initialized (only for reserved regions). */ enum memblock_flags { MEMBLOCK_NONE = 0x0, /* No special request */ @@ -47,6 +49,7 @@ enum memblock_flags { MEMBLOCK_MIRROR = 0x2, /* mirrored region */ MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */ + MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */ }; /** @@ -118,6 +121,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size); int memblock_physmem_add(phys_addr_t base, phys_addr_t size); #endif void memblock_trim_memory(phys_addr_t align); +unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, + phys_addr_t base2, phys_addr_t size2); bool memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); @@ -125,6 +130,7 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); +int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size); void memblock_free_all(void); void memblock_free(void *ptr, size_t size); @@ -259,6 +265,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m) return m->flags & MEMBLOCK_NOMAP; } +static inline bool memblock_is_reserved_noinit(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_RSRV_NOINIT; +} + static inline bool memblock_is_driver_managed(struct memblock_region *m) { return m->flags & MEMBLOCK_DRIVER_MANAGED; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e4e24da16d..7bdcf3020d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -21,6 +21,7 @@ #include <linux/vmstat.h> #include <linux/writeback.h> #include <linux/page-flags.h> +#include <linux/shrinker.h> struct mem_cgroup; struct obj_cgroup; @@ -88,17 +89,6 @@ struct mem_cgroup_reclaim_iter { unsigned int generation; }; -/* - * Bitmap and deferred work of shrinker::id corresponding to memcg-aware - * shrinkers, which have elements charged to this memcg. - */ -struct shrinker_info { - struct rcu_head rcu; - atomic_long_t *nr_deferred; - unsigned long *map; - int map_nr_max; -}; - struct lruvec_stats_percpu { /* Local (CPU and cgroup) state */ long state[NR_VM_NODE_STAT_ITEMS]; @@ -153,7 +143,7 @@ struct mem_cgroup_threshold_ary { /* Size of entries[] */ unsigned int size; /* Array of thresholds */ - struct mem_cgroup_threshold entries[]; + struct mem_cgroup_threshold entries[] __counted_by(size); }; struct mem_cgroup_thresholds { @@ -299,7 +289,13 @@ struct mem_cgroup { #ifdef CONFIG_MEMCG_KMEM int kmemcg_id; - struct obj_cgroup __rcu *objcg; + /* + * memcg->objcg is wiped out as a part of the objcg repaprenting + * process. memcg->orig_objcg preserves a pointer (and a reference) + * to the original objcg until the end of live of memcg. + */ + struct obj_cgroup __rcu *objcg; + struct obj_cgroup *orig_objcg; /* list of inherited objcgs, protected by objcg_lock */ struct list_head objcg_list; #endif @@ -662,6 +658,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, page_counter_read(&memcg->memory); } +void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg); + int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); /** @@ -686,6 +684,9 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, return __mem_cgroup_charge(folio, mm, gfp); } +int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, + long nr_pages); + int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); @@ -713,6 +714,10 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) __mem_cgroup_uncharge_list(page_list); } +void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages); + +void mem_cgroup_replace_folio(struct folio *old, struct folio *new); + void mem_cgroup_migrate(struct folio *old, struct folio *new); /** @@ -769,6 +774,8 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); +struct mem_cgroup *get_mem_cgroup_from_current(void); + struct lruvec *folio_lruvec_lock(struct folio *folio); struct lruvec *folio_lruvec_lock_irq(struct folio *folio); struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, @@ -1080,15 +1087,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg, local_irq_restore(flags); } -static inline void count_memcg_page_event(struct page *page, - enum vm_event_item idx) -{ - struct mem_cgroup *memcg = page_memcg(page); - - if (memcg) - count_memcg_events(memcg, idx, 1); -} - static inline void count_memcg_folio_events(struct folio *folio, enum vm_event_item idx, unsigned long nr) { @@ -1249,12 +1247,23 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, return false; } +static inline void mem_cgroup_commit_charge(struct folio *folio, + struct mem_cgroup *memcg) +{ +} + static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) { return 0; } +static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, + gfp_t gfp, long nr_pages) +{ + return 0; +} + static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) { @@ -1273,6 +1282,16 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) { } +static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, + unsigned int nr_pages) +{ +} + +static inline void mem_cgroup_replace_folio(struct folio *old, + struct folio *new) +{ +} + static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) { } @@ -1310,6 +1329,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return NULL; } +static inline struct mem_cgroup *get_mem_cgroup_from_current(void) +{ + return NULL; +} + static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) { @@ -1565,11 +1589,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, { } -static inline void count_memcg_page_event(struct page *page, - int idx) -{ -} - static inline void count_memcg_folio_events(struct folio *folio, enum vm_event_item idx, unsigned long nr) { @@ -1763,9 +1782,27 @@ bool mem_cgroup_kmem_disabled(void); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); -struct obj_cgroup *get_obj_cgroup_from_current(void); +/* + * The returned objcg pointer is safe to use without additional + * protection within a scope. The scope is defined either by + * the current task (similar to the "current" global variable) + * or by set_active_memcg() pair. + * Please, use obj_cgroup_get() to get a reference if the pointer + * needs to be used outside of the local scope. + */ +struct obj_cgroup *current_obj_cgroup(void); struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); +static inline struct obj_cgroup *get_obj_cgroup_from_current(void) +{ + struct obj_cgroup *objcg = current_obj_cgroup(); + + if (objcg) + obj_cgroup_get(objcg); + + return objcg; +} + int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index 437441cdf7..1e39d27bee 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -6,6 +6,7 @@ #include <linux/nodemask.h> #include <linux/kref.h> #include <linux/mmzone.h> +#include <linux/notifier.h> /* * Each tier cover a abstrace distance chunk size of 128 */ @@ -22,7 +23,9 @@ struct memory_tier; struct memory_dev_type { /* list of memory types that are part of same tier as this type */ - struct list_head tier_sibiling; + struct list_head tier_sibling; + /* list of memory types that are managed by one driver */ + struct list_head list; /* abstract distance for this specific memory type */ int adistance; /* Nodes of same abstract distance */ @@ -30,12 +33,21 @@ struct memory_dev_type { struct kref kref; }; +struct node_hmem_attrs; + #ifdef CONFIG_NUMA extern bool numa_demotion_enabled; +extern struct memory_dev_type *default_dram_type; struct memory_dev_type *alloc_memory_type(int adistance); void put_memory_type(struct memory_dev_type *memtype); void init_node_memory_type(int node, struct memory_dev_type *default_type); void clear_node_memory_type(int node, struct memory_dev_type *memtype); +int register_mt_adistance_algorithm(struct notifier_block *nb); +int unregister_mt_adistance_algorithm(struct notifier_block *nb); +int mt_calc_adistance(int node, int *adist); +int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf, + const char *source); +int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist); #ifdef CONFIG_MIGRATION int next_demotion_node(int node); void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); @@ -60,6 +72,7 @@ static inline bool node_is_toptier(int node) #else #define numa_demotion_enabled false +#define default_dram_type NULL /* * CONFIG_NUMA implementation returns non NULL error. */ @@ -97,5 +110,31 @@ static inline bool node_is_toptier(int node) { return true; } + +static inline int register_mt_adistance_algorithm(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb) +{ + return 0; +} + +static inline int mt_calc_adistance(int node, int *adist) +{ + return NOTIFY_DONE; +} + +static inline int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf, + const char *source) +{ + return -EIO; +} + +static inline int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist) +{ + return -EIO; +} #endif /* CONFIG_NUMA */ #endif /* _LINUX_MEMORY_TIERS_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index d232de7cdc..931b118336 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -17,6 +17,8 @@ struct mm_struct; +#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */ + #ifdef CONFIG_NUMA /* @@ -89,8 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol) return pol; } -#define vma_policy(vma) ((vma)->vm_policy) - static inline void mpol_get(struct mempolicy *pol) { if (pol) @@ -107,35 +107,30 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) /* * Tree of shared policies for a shared memory region. - * Maintain the policies in a pseudo mm that contains vmas. The vmas - * carry the policy. As a special twist the pseudo mm is indexed in pages, not - * bytes, so that we can work with shared memory segments bigger than - * unsigned long. */ - -struct sp_node { - struct rb_node nd; - unsigned long start, end; - struct mempolicy *policy; -}; - struct shared_policy { struct rb_root root; rwlock_t lock; }; +struct sp_node { + struct rb_node nd; + pgoff_t start, end; + struct mempolicy *policy; +}; int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); -int mpol_set_shared_policy(struct shared_policy *info, - struct vm_area_struct *vma, - struct mempolicy *new); -void mpol_free_shared_policy(struct shared_policy *p); +int mpol_set_shared_policy(struct shared_policy *sp, + struct vm_area_struct *vma, struct mempolicy *mpol); +void mpol_free_shared_policy(struct shared_policy *sp); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, - unsigned long idx); + pgoff_t idx); struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, - unsigned long addr); + unsigned long addr, pgoff_t *ilx); +struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr, int order, pgoff_t *ilx); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); @@ -149,8 +144,6 @@ extern int huge_node(struct vm_area_struct *vma, extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_in_oom_domain(struct task_struct *tsk, const nodemask_t *mask); -extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); - extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -174,7 +167,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ extern bool vma_migratable(struct vm_area_struct *vma); -extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); static inline bool mpol_is_preferred_many(struct mempolicy *pol) @@ -188,12 +181,17 @@ extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone); struct mempolicy {}; +static inline struct mempolicy *get_task_policy(struct task_struct *p) +{ + return NULL; +} + static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { return true; } -static inline void mpol_put(struct mempolicy *p) +static inline void mpol_put(struct mempolicy *pol) { } @@ -212,17 +210,22 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp, { } -static inline void mpol_free_shared_policy(struct shared_policy *p) +static inline void mpol_free_shared_policy(struct shared_policy *sp) { } static inline struct mempolicy * -mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) +mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx) { return NULL; } -#define vma_policy(vma) NULL +static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr, int order, pgoff_t *ilx) +{ + *ilx = 0; + return NULL; +} static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) @@ -278,7 +281,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol) } #endif -static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, +static inline int mpol_misplaced(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h index 09fb3c56e7..76d326ea8e 100644 --- a/include/linux/mfd/abx500/ab8500.h +++ b/include/linux/mfd/abx500/ab8500.h @@ -499,13 +499,7 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab) void ab8500_override_turn_on_stat(u8 mask, u8 set); -#ifdef CONFIG_AB8500_DEBUG -extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); -void ab8500_dump_all_banks(struct device *dev); -void ab8500_debug_register_interrupt(int line); -#else static inline void ab8500_dump_all_banks(struct device *dev) {} static inline void ab8500_debug_register_interrupt(int line) {} -#endif #endif /* MFD_AB8500_H */ diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h index ea4a4b1b24..1fbda1f896 100644 --- a/include/linux/mfd/lpc_ich.h +++ b/include/linux/mfd/lpc_ich.h @@ -15,7 +15,7 @@ #define ICH_RES_GPE0 1 /* GPIO compatibility */ -enum { +enum lpc_gpio_versions { ICH_I3100_GPIO, ICH_V5_GPIO, ICH_V6_GPIO, @@ -26,11 +26,14 @@ enum { AVOTON_GPIO, }; +struct lpc_ich_gpio_info; + struct lpc_ich_info { char name[32]; unsigned int iTCO_version; - unsigned int gpio_version; + enum lpc_gpio_versions gpio_version; enum intel_spi_type spi_type; + const struct lpc_ich_gpio_info *gpio_info; u8 use_gpio; }; diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h index 3d33517f17..d83e87298a 100644 --- a/include/linux/mfd/mt6358/registers.h +++ b/include/linux/mfd/mt6358/registers.h @@ -262,6 +262,12 @@ #define MT6358_LDO_VBIF28_CON3 0x1db0 #define MT6358_VCAMA1_ANA_CON0 0x1e08 #define MT6358_VCAMA2_ANA_CON0 0x1e0c +#define MT6358_VFE28_ANA_CON0 0x1e10 +#define MT6358_VCN28_ANA_CON0 0x1e14 +#define MT6358_VBIF28_ANA_CON0 0x1e18 +#define MT6358_VAUD28_ANA_CON0 0x1e1c +#define MT6358_VAUX18_ANA_CON0 0x1e20 +#define MT6358_VXO22_ANA_CON0 0x1e24 #define MT6358_VCN33_ANA_CON0 0x1e28 #define MT6358_VSIM1_ANA_CON0 0x1e2c #define MT6358_VSIM2_ANA_CON0 0x1e30 @@ -288,4 +294,21 @@ #define MT6358_AUD_TOP_INT_CON0 0x2228 #define MT6358_AUD_TOP_INT_STATUS0 0x2234 +/* + * MT6366 has no VCAM*, but has other regulators in its place. The names + * keep the MT6358 prefix for ease of use in the regulator driver. + */ +#define MT6358_LDO_VSRAM_CON5 0x1bf8 +#define MT6358_LDO_VM18_CON0 MT6358_LDO_VCAMA1_CON0 +#define MT6358_LDO_VM18_CON1 MT6358_LDO_VCAMA1_CON1 +#define MT6358_LDO_VM18_CON2 MT6358_LDO_VCAMA1_CON2 +#define MT6358_LDO_VMDDR_CON0 MT6358_LDO_VCAMA2_CON0 +#define MT6358_LDO_VMDDR_CON1 MT6358_LDO_VCAMA2_CON1 +#define MT6358_LDO_VMDDR_CON2 MT6358_LDO_VCAMA2_CON2 +#define MT6358_LDO_VSRAM_CORE_CON0 MT6358_LDO_VCAMD_CON0 +#define MT6358_LDO_VSRAM_CORE_DBG0 0x1cb6 +#define MT6358_LDO_VSRAM_CORE_DBG1 0x1cb8 +#define MT6358_VM18_ANA_CON0 MT6358_VCAMA1_ANA_CON0 +#define MT6358_VMDDR_ANA_CON0 MT6358_VCAMD_ANA_CON0 + #endif /* __MFD_MT6358_REGISTERS_H__ */ diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h index 1b94325feb..ca35af3074 100644 --- a/include/linux/mfd/stm32-timers.h +++ b/include/linux/mfd/stm32-timers.h @@ -102,6 +102,15 @@ enum stm32_timers_dmas { STM32_TIMERS_MAX_DMAS, }; +/* STM32 Timer may have either a unique global interrupt or 4 interrupt lines */ +enum stm32_timers_irqs { + STM32_TIMERS_IRQ_GLOBAL_BRK, /* global or brk IRQ */ + STM32_TIMERS_IRQ_UP, + STM32_TIMERS_IRQ_TRG_COM, + STM32_TIMERS_IRQ_CC, + STM32_TIMERS_MAX_IRQS, +}; + /** * struct stm32_timers_dma - STM32 timer DMA handling. * @completion: end of DMA transfer completion @@ -123,6 +132,8 @@ struct stm32_timers { struct regmap *regmap; u32 max_arr; struct stm32_timers_dma dma; /* Only to be used by the parent */ + unsigned int nr_irqs; + int irq[STM32_TIMERS_MAX_IRQS]; }; #if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS) diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 4e27ca7c49..591bf5b5e8 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -64,6 +64,10 @@ #define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1) #define KSZ886X_BMCR_DISABLE_LED BIT(0) +/* PHY Special Control/Status Register (Reg 31) */ #define KSZ886X_CTRL_MDIX_STAT BIT(4) +#define KSZ886X_CTRL_FORCE_LINK BIT(3) +#define KSZ886X_CTRL_PWRSAVE BIT(2) +#define KSZ886X_CTRL_REMOTE_LOOPBACK BIT(1) #endif /* _MICREL_PHY_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 711dd94125..2ce13e8a30 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page) } #ifdef CONFIG_NUMA_BALANCING -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, +int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node); #else -static inline int migrate_misplaced_page(struct page *page, +static inline int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 83aebc244c..2ec559284a 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -2,60 +2,77 @@ #ifndef _LINUX_MINMAX_H #define _LINUX_MINMAX_H +#include <linux/build_bug.h> +#include <linux/compiler.h> #include <linux/const.h> #include <linux/types.h> /* * min()/max()/clamp() macros must accomplish three things: * - * - avoid multiple evaluations of the arguments (so side-effects like + * - Avoid multiple evaluations of the arguments (so side-effects like * "x++" happen only once) when non-constant. - * - perform strict type-checking (to generate warnings instead of - * nasty runtime surprises). See the "unnecessary" pointer comparison - * in __typecheck(). - * - retain result as a constant expressions when called with only + * - Retain result as a constant expressions when called with only * constant expressions (to avoid tripping VLA warnings in stack * allocation usage). + * - Perform signed v unsigned type-checking (to generate compile + * errors instead of nasty runtime surprises). + * - Unsigned char/short are always promoted to signed int and can be + * compared against signed or unsigned arguments. + * - Unsigned arguments can be compared against non-negative signed constants. + * - Comparison of a signed argument against an unsigned constant fails + * even if the constant is below __INT_MAX__ and could be cast to int. */ #define __typecheck(x, y) \ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) -#define __no_side_effects(x, y) \ - (__is_constexpr(x) && __is_constexpr(y)) +/* is_signed_type() isn't a constexpr for pointer types */ +#define __is_signed(x) \ + __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \ + is_signed_type(typeof(x)), 0) -#define __safe_cmp(x, y) \ - (__typecheck(x, y) && __no_side_effects(x, y)) +/* True for a non-negative signed int constant */ +#define __is_noneg_int(x) \ + (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0) -#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) +#define __types_ok(x, y) \ + (__is_signed(x) == __is_signed(y) || \ + __is_signed((x) + 0) == __is_signed((y) + 0) || \ + __is_noneg_int(x) || __is_noneg_int(y)) -#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ - typeof(x) unique_x = (x); \ - typeof(y) unique_y = (y); \ - __cmp(unique_x, unique_y, op); }) +#define __cmp_op_min < +#define __cmp_op_max > -#define __careful_cmp(x, y, op) \ - __builtin_choose_expr(__safe_cmp(x, y), \ - __cmp(x, y, op), \ - __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) +#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y)) + +#define __cmp_once(op, x, y, unique_x, unique_y) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + static_assert(__types_ok(x, y), \ + #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \ + __cmp(op, unique_x, unique_y); }) + +#define __careful_cmp(op, x, y) \ + __builtin_choose_expr(__is_constexpr((x) - (y)), \ + __cmp(op, x, y), \ + __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y))) #define __clamp(val, lo, hi) \ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) -#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \ - typeof(val) unique_val = (val); \ - typeof(lo) unique_lo = (lo); \ - typeof(hi) unique_hi = (hi); \ - __clamp(unique_val, unique_lo, unique_hi); }) - -#define __clamp_input_check(lo, hi) \ - (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ - __is_constexpr((lo) > (hi)), (lo) > (hi), false))) +#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \ + typeof(val) unique_val = (val); \ + typeof(lo) unique_lo = (lo); \ + typeof(hi) unique_hi = (hi); \ + static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \ + (lo) <= (hi), true), \ + "clamp() low limit " #lo " greater than high limit " #hi); \ + static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \ + static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \ + __clamp(unique_val, unique_lo, unique_hi); }) #define __careful_clamp(val, lo, hi) ({ \ - __clamp_input_check(lo, hi) + \ - __builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \ - __typecheck(hi, lo) && __is_constexpr(val) && \ - __is_constexpr(lo) && __is_constexpr(hi), \ + __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \ __clamp(val, lo, hi), \ __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \ __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); }) @@ -65,14 +82,31 @@ * @x: first value * @y: second value */ -#define min(x, y) __careful_cmp(x, y, <) +#define min(x, y) __careful_cmp(min, x, y) /** * max - return maximum of two values of the same or compatible types * @x: first value * @y: second value */ -#define max(x, y) __careful_cmp(x, y, >) +#define max(x, y) __careful_cmp(max, x, y) + +/** + * umin - return minimum of two non-negative values + * Signed types are zero extended to match a larger unsigned type. + * @x: first value + * @y: second value + */ +#define umin(x, y) \ + __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull) + +/** + * umax - return maximum of two non-negative values + * @x: first value + * @y: second value + */ +#define umax(x, y) \ + __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull) /** * min3 - return minimum of three values @@ -124,7 +158,7 @@ * @x: first value * @y: second value */ -#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) +#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y)) /** * max_t - return maximum of two values, using the specified type @@ -132,28 +166,7 @@ * @x: first value * @y: second value */ -#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) - -/* - * Remove a const qualifier from integer types - * _Generic(foo, type-name: association, ..., default: association) performs a - * comparison against the foo type (not the qualified type). - * Do not use the const keyword in the type-name as it will not match the - * unqualified type of foo. - */ -#define __unconst_integer_type_cases(type) \ - unsigned type: (unsigned type)0, \ - signed type: (signed type)0 - -#define __unconst_integer_typeof(x) typeof( \ - _Generic((x), \ - char: (char)0, \ - __unconst_integer_type_cases(char), \ - __unconst_integer_type_cases(short), \ - __unconst_integer_type_cases(int), \ - __unconst_integer_type_cases(long), \ - __unconst_integer_type_cases(long long), \ - default: (x))) +#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y)) /* * Do not check the array parameter using __must_be_array(). @@ -169,13 +182,13 @@ * 'int *buff' and 'int buff[N]' types. * * The array can be an array of const items. - * typeof() keeps the const qualifier. Use __unconst_integer_typeof() in order + * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order * to discard the const qualifier for the __element variable. */ #define __minmax_array(op, array, len) ({ \ typeof(&(array)[0]) __array = (array); \ typeof(len) __len = (len); \ - __unconst_integer_typeof(__array[0]) __element = __array[--__len]; \ + __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\ while (__len--) \ __element = op(__element, __array[__len]); \ __element; }) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 26333d602a..820bca965f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -366,6 +366,7 @@ enum mlx5_driver_event { MLX5_DRIVER_EVENT_UPLINK_NETDEV, MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, + MLX5_DRIVER_EVENT_SF_PEER_DEVLINK, MLX5_DRIVER_EVENT_AFFILIATION_DONE, MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5ca4e085d8..d2b8d4a74a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -155,6 +155,8 @@ enum { MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, + MLX5_REG_MSECQ = 0x9155, + MLX5_REG_MSEES = 0x9156, MLX5_REG_MIRC = 0x9162, MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, @@ -613,6 +615,7 @@ struct mlx5_priv { int adev_idx; int sw_vhca_id; struct mlx5_events *events; + struct mlx5_vhca_events *vhca_events; struct mlx5_flow_steering *steering; struct mlx5_mpfs *mpfs; @@ -621,6 +624,7 @@ struct mlx5_priv { struct mlx5_lag *lag; u32 flags; struct mlx5_devcom_dev *devc; + struct mlx5_devcom_comp_dev *hca_devcom_comp; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; @@ -1039,10 +1043,6 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); -struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, - gfp_t flags, int npages); -void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, - struct mlx5_cmd_mailbox *head); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, int inlen); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); @@ -1056,8 +1056,6 @@ void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s32 npages, bool ec_function); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); @@ -1097,8 +1095,6 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); __be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); -int mlx5_query_odp_caps(struct mlx5_core_dev *dev, - struct mlx5_odp_caps *odp_caps); int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); @@ -1200,12 +1196,6 @@ int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, int vf_id, struct notifier_block *nb); -#ifdef CONFIG_MLX5_CORE_IPOIB -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, - struct ib_device *ibdev, - const char *name, - void (*setup)(struct net_device *)); -#endif /* CONFIG_MLX5_CORE_IPOIB */ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, struct ib_device *device, struct rdma_netdev_alloc_params *params); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 51eb83f779..77cd2e1372 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -312,6 +312,7 @@ enum { MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d, MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, MLX5_CMD_OP_SYNC_CRYPTO = 0xb12, + MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16, MLX5_CMD_OP_MAX }; @@ -1102,7 +1103,7 @@ struct mlx5_ifc_roce_cap_bits { u8 sw_r_roce_src_udp_port[0x1]; u8 fl_rc_qp_when_roce_disabled[0x1]; u8 fl_rc_qp_when_roce_enabled[0x1]; - u8 reserved_at_7[0x1]; + u8 roce_cc_general[0x1]; u8 qp_ooo_transmit_default[0x1]; u8 reserved_at_9[0x15]; u8 qp_ts_format[0x2]; @@ -1231,7 +1232,13 @@ struct mlx5_ifc_virtio_emulation_cap_bits { u8 max_emulated_devices[0x8]; u8 max_num_virtio_queues[0x18]; - u8 reserved_at_a0[0x60]; + u8 reserved_at_a0[0x20]; + + u8 reserved_at_c0[0x13]; + u8 desc_group_mkey_supported[0x1]; + u8 reserved_at_d4[0xc]; + + u8 reserved_at_e0[0x20]; u8 umem_1_buffer_param_a[0x20]; @@ -1934,6 +1941,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 match_definer_format_supported[0x40]; }; +enum { + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000, +}; + +enum { + MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200, +}; + struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_0[0x80]; @@ -1948,9 +1963,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_c0[0x8]; u8 migration_multi_load[0x1]; u8 migration_tracking_state[0x1]; - u8 reserved_at_ca[0x16]; + u8 reserved_at_ca[0x6]; + u8 migration_in_chunks[0x1]; + u8 reserved_at_d1[0xf]; - u8 reserved_at_e0[0xc0]; + u8 cross_vhca_object_to_object_supported[0x20]; + + u8 allowed_object_for_other_vhca_access[0x40]; + + u8 reserved_at_140[0x60]; u8 flow_table_type_2_type[0x8]; u8 reserved_at_1a8[0x3]; @@ -6369,6 +6390,28 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_allow_other_vhca_access_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0x50]; + u8 object_type_to_be_accessed[0x10]; + u8 object_id_to_be_accessed[0x20]; + u8 reserved_at_c0[0x40]; + union { + u8 access_key_raw[0x100]; + u8 access_key[8][0x20]; + }; +}; + +struct mlx5_ifc_allow_other_vhca_access_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_modify_header_arg_bits { u8 reserved_at_0[0x80]; @@ -6391,6 +6434,24 @@ struct mlx5_ifc_create_match_definer_out_bits { struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; }; +struct mlx5_ifc_alias_context_bits { + u8 vhca_id_to_be_accessed[0x10]; + u8 reserved_at_10[0xd]; + u8 status[0x3]; + u8 object_id_to_be_accessed[0x20]; + u8 reserved_at_40[0x40]; + union { + u8 access_key_raw[0x100]; + u8 access_key[8][0x20]; + }; + u8 metadata[0x80]; +}; + +struct mlx5_ifc_create_alias_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_alias_context_bits alias_ctx; +}; + enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@ -10176,7 +10237,9 @@ struct mlx5_ifc_mcam_access_reg_bits2 { u8 mirc[0x1]; u8 regs_97_to_96[0x2]; - u8 regs_95_to_64[0x20]; + u8 regs_95_to_87[0x09]; + u8 synce_registers[0x2]; + u8 regs_84_to_64[0x15]; u8 regs_63_to_32[0x20]; @@ -10572,6 +10635,7 @@ enum { MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe, MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12, }; struct mlx5_ifc_initial_seg_bits { @@ -11917,6 +11981,7 @@ enum { MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27, MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15, }; enum { @@ -12399,7 +12464,8 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits { u8 op_mod[0x10]; u8 incremental[0x1]; - u8 reserved_at_41[0xf]; + u8 chunk[0x1]; + u8 reserved_at_42[0xe]; u8 vhca_id[0x10]; u8 reserved_at_60[0x20]; @@ -12415,7 +12481,11 @@ struct mlx5_ifc_query_vhca_migration_state_out_bits { u8 required_umem_size[0x20]; - u8 reserved_at_a0[0x160]; + u8 reserved_at_a0[0x20]; + + u8 remaining_total_size[0x40]; + + u8 reserved_at_100[0x100]; }; struct mlx5_ifc_save_vhca_state_in_bits { @@ -12447,7 +12517,7 @@ struct mlx5_ifc_save_vhca_state_out_bits { u8 actual_image_size[0x20]; - u8 reserved_at_60[0x20]; + u8 next_required_umem_size[0x20]; }; struct mlx5_ifc_load_vhca_state_in_bits { @@ -12556,4 +12626,59 @@ struct mlx5_ifc_modify_page_track_obj_in_bits { struct mlx5_ifc_page_track_bits obj_context; }; +struct mlx5_ifc_msecq_reg_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x12]; + u8 network_option[0x2]; + u8 local_ssm_code[0x4]; + u8 local_enhanced_ssm_code[0x8]; + + u8 local_clock_identity[0x40]; + + u8 reserved_at_80[0x180]; +}; + +enum { + MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0), + MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1), + MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2), +}; + +enum mlx5_msees_admin_status { + MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0, + MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1, +}; + +enum mlx5_msees_oper_status { + MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0, + MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1, + MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2, + MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3, + MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4, + MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5, +}; + +struct mlx5_ifc_msees_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 lp_msb[0x2]; + u8 reserved_at_14[0xc]; + + u8 field_select[0x20]; + + u8 admin_status[0x4]; + u8 oper_status[0x4]; + u8 ho_acq[0x1]; + u8 reserved_at_49[0xc]; + u8 admin_freq_measure[0x1]; + u8 oper_freq_measure[0x1]; + u8 failure_reason[0x9]; + + u8 frequency_diff[0x20]; + + u8 reserved_at_80[0x180]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 9becdc3fa5..b86d51a855 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -74,7 +74,11 @@ struct mlx5_ifc_virtio_q_bits { u8 reserved_at_320[0x8]; u8 pd[0x18]; - u8 reserved_at_340[0xc0]; + u8 reserved_at_340[0x20]; + + u8 desc_group_mkey[0x20]; + + u8 reserved_at_380[0x80]; }; struct mlx5_ifc_virtio_net_q_object_bits { @@ -141,6 +145,7 @@ enum { MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0, MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3, MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4, + MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14, }; enum { diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 98b2e1e149..26092c78a9 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -115,8 +115,9 @@ enum mlx5e_ext_link_mode { MLX5E_100GAUI_1_100GBASE_CR_KR = 11, MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, - MLX5E_400GAUI_8 = 15, + MLX5E_400GAUI_8_400GBASE_CR8 = 15, MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, + MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19, MLX5E_EXT_LINK_MODES_NUMBER, }; diff --git a/include/linux/mm.h b/include/linux/mm.h index bf5d0b1b16..da5219b48d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -362,8 +362,6 @@ extern unsigned int kobjsize(const void *objp); # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 -#elif defined(CONFIG_IA64) -# define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_SPARC64) # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ # define VM_ARCH_CLEAR VM_SPARC_ADI @@ -619,7 +617,7 @@ struct vm_operations_struct { * policy. */ struct mempolicy *(*get_policy)(struct vm_area_struct *vma, - unsigned long addr); + unsigned long addr, pgoff_t *ilx); #endif /* * Called by vm_normal_page() for special PTEs to find the @@ -888,8 +886,8 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma) */ static inline bool vma_is_initial_heap(const struct vm_area_struct *vma) { - return vma->vm_start <= vma->vm_mm->brk && - vma->vm_end >= vma->vm_mm->start_brk; + return vma->vm_start < vma->vm_mm->brk && + vma->vm_end > vma->vm_mm->start_brk; } /* @@ -903,8 +901,8 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma) * its "stack". It's not even well-defined for programs written * languages like Go. */ - return vma->vm_start <= vma->vm_mm->start_stack && - vma->vm_end >= vma->vm_mm->start_stack; + return vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack; } static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) @@ -937,6 +935,17 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) return vma->vm_flags & VM_ACCESS_FLAGS; } +static inline bool is_shared_maywrite(vm_flags_t vm_flags) +{ + return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == + (VM_SHARED | VM_MAYWRITE); +} + +static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) +{ + return is_shared_maywrite(vma->vm_flags); +} + static inline struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) { @@ -1337,7 +1346,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); -vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif /* @@ -1686,26 +1694,26 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); + return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK); } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page->_last_cpupid; + return folio->_last_cpupid; } static inline void page_cpupid_reset_last(struct page *page) { page->_last_cpupid = -1 & LAST_CPUPID_MASK; } #else -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; + return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; } -extern int page_cpupid_xchg_last(struct page *page, int cpupid); +int folio_xchg_last_cpupid(struct folio *folio, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { @@ -1713,11 +1721,12 @@ static inline void page_cpupid_reset_last(struct page *page) } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { int last_time; - last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS); + last_time = folio_xchg_last_cpupid(folio, + time >> PAGE_ACCESS_TIME_BUCKETS); return last_time << PAGE_ACCESS_TIME_BUCKETS; } @@ -1726,24 +1735,24 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) unsigned int pid_bit; pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG)); - if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) { - __set_bit(pid_bit, &vma->numab_state->access_pids[1]); + if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) { + __set_bit(pid_bit, &vma->numab_state->pids_active[1]); } } #else /* !CONFIG_NUMA_BALANCING */ -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { return 0; } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } static inline int cpupid_to_nid(int cpupid) @@ -2327,6 +2336,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, pte_t pte); struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t pmd); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); @@ -2413,8 +2424,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); -extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, - void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, @@ -2425,6 +2434,9 @@ long pin_user_pages_remote(struct mm_struct *mm, unsigned int gup_flags, struct page **pages, int *locked); +/* + * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT. + */ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, unsigned long addr, int gup_flags, @@ -2432,12 +2444,15 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, { struct page *page; struct vm_area_struct *vma; - int got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); + int got; + + if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT))) + return ERR_PTR(-EINVAL); + + got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); if (got < 0) return ERR_PTR(got); - if (got == 0) - return NULL; vma = vma_lookup(mm, addr); if (WARN_ON_ONCE(!vma)) { @@ -2480,7 +2495,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, - bool need_rmap_locks); + bool need_rmap_locks, bool for_stack); /* * Flags used by change_protection(). For now we make it a bitmap so @@ -2628,14 +2643,6 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, *maxrss = hiwater_rss; } -#if defined(SPLIT_RSS_COUNTING) -void sync_mm_rss(struct mm_struct *mm); -#else -static inline void sync_mm_rss(struct mm_struct *mm) -{ -} -#endif - #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL static inline int pte_special(pte_t pte) { @@ -3057,6 +3064,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) return ptl; } +static inline void pagetable_pud_ctor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_set_pgtable(folio); + lruvec_stat_add_folio(folio, NR_PAGETABLE); +} + +static inline void pagetable_pud_dtor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_clear_pgtable(folio); + lruvec_stat_sub_folio(folio, NR_PAGETABLE); +} + extern void __init pagecache_init(void); extern void free_initmem(void); @@ -3221,22 +3244,73 @@ extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *next); extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff); -extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi, - struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, - unsigned long end, unsigned long vm_flags, struct anon_vma *, - struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx, - struct anon_vma_name *); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); -extern int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *, - unsigned long addr, int new_below); -extern int split_vma(struct vma_iterator *vmi, struct vm_area_struct *, - unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +struct vm_area_struct *vma_modify(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long vm_flags, + struct mempolicy *policy, + struct vm_userfaultfd_ctx uffd_ctx, + struct anon_vma_name *anon_name); + +/* We are about to modify the VMA's flags. */ +static inline struct vm_area_struct +*vma_modify_flags(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long new_flags) +{ + return vma_modify(vmi, prev, vma, start, end, new_flags, + vma_policy(vma), vma->vm_userfaultfd_ctx, + anon_vma_name(vma)); +} + +/* We are about to modify the VMA's flags and/or anon_name. */ +static inline struct vm_area_struct +*vma_modify_flags_name(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + unsigned long new_flags, + struct anon_vma_name *new_name) +{ + return vma_modify(vmi, prev, vma, start, end, new_flags, + vma_policy(vma), vma->vm_userfaultfd_ctx, new_name); +} + +/* We are about to modify the VMA's memory policy. */ +static inline struct vm_area_struct +*vma_modify_policy(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct mempolicy *new_pol) +{ + return vma_modify(vmi, prev, vma, start, end, vma->vm_flags, + new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma)); +} + +/* We are about to modify the VMA's flags and/or uffd context. */ +static inline struct vm_area_struct +*vma_modify_flags_uffd(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long new_flags, + struct vm_userfaultfd_ctx new_ctx) +{ + return vma_modify(vmi, prev, vma, start, end, new_flags, + vma_policy(vma), new_ctx, anon_vma_name(vma)); +} static inline int check_data_rlimit(unsigned long rlim, unsigned long new, @@ -3308,8 +3382,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) static inline void mm_populate(unsigned long addr, unsigned long len) {} #endif -/* These take the mm semaphore themselves */ -extern int __must_check vm_brk(unsigned long, unsigned long); +/* This takes the mm semaphore itself */ extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); extern int vm_munmap(unsigned long, size_t); extern unsigned long __must_check vm_mmap(struct file *, unsigned long, @@ -4000,25 +4073,26 @@ static inline void mem_dump_obj(void *object) {} #endif /** - * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it + * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and + * handle them. * @seals: the seals to check * @vma: the vma to operate on * - * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on - * the vma flags. Return 0 if check pass, or <0 for errors. + * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper + * check/handling on the vma flags. Return 0 if check pass, or <0 for errors. */ -static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) +static inline int seal_check_write(int seals, struct vm_area_struct *vma) { - if (seals & F_SEAL_FUTURE_WRITE) { + if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { /* * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * "future write" seal active. + * write seals are active. */ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) return -EPERM; /* - * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as + * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as * MAP_SHARED and read-only, take care to not allow mprotect to * revert protections on such mappings. Do this only for shared * mappings. For private mappings, don't need to mask @@ -4062,4 +4136,11 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end) #endif +static inline bool pfn_is_unaccepted_memory(unsigned long pfn) +{ + phys_addr_t paddr = pfn << PAGE_SHIFT; + + return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE); +} + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 96b1c15755..f4fe593c14 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -4,6 +4,7 @@ #include <linux/atomic.h> #include <linux/huge_mm.h> +#include <linux/mm_types.h> #include <linux/swap.h> #include <linux/string.h> #include <linux/userfaultfd_k.h> @@ -357,15 +358,6 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) } #ifdef CONFIG_ANON_VMA_NAME -/* - * mmap_lock should be read-locked when calling anon_vma_name(). Caller should - * either keep holding the lock while using the returned pointer or it should - * raise anon_vma_name refcount before releasing the lock. - */ -extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); -extern struct anon_vma_name *anon_vma_name_alloc(const char *name); -extern void anon_vma_name_free(struct kref *kref); - /* mmap_lock should be read-locked */ static inline void anon_vma_name_get(struct anon_vma_name *anon_name) { @@ -420,16 +412,6 @@ static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, } #else /* CONFIG_ANON_VMA_NAME */ -static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) -{ - return NULL; -} - -static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) -{ - return NULL; -} - static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 36c5b43999..950df415d7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -125,18 +125,7 @@ struct page { struct page_pool *pp; unsigned long _pp_mapping_pad; unsigned long dma_addr; - union { - /** - * dma_addr_upper: might require a 64-bit - * value on 32-bit architectures. - */ - unsigned long dma_addr_upper; - /** - * For frag page support, not supported in - * 32-bit architectures with 64-bit DMA. - */ - atomic_long_t pp_frag_count; - }; + atomic_long_t pp_frag_count; }; struct { /* Tail pages of compound page */ unsigned long compound_head; /* Bit zero is set */ @@ -199,6 +188,10 @@ struct page { not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; +#endif + #ifdef CONFIG_KMSAN /* * KMSAN metadata for this page: @@ -210,10 +203,6 @@ struct page { struct page *kmsan_shadow; struct page *kmsan_origin; #endif - -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS - int _last_cpupid; -#endif } _struct_page_alignment; /* @@ -272,6 +261,8 @@ typedef struct { * @_refcount: Do not access this member directly. Use folio_ref_count() * to find how many references there are to this folio. * @memcg_data: Memory Control Group data. + * @virtual: Virtual address in the kernel direct map. + * @_last_cpupid: IDs of last CPU and last process that accessed the folio. * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). * @_nr_pages_mapped: Do not use directly, call folio_mapcount(). * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). @@ -318,6 +309,12 @@ struct folio { #ifdef CONFIG_MEMCG unsigned long memcg_data; #endif +#if defined(WANT_PAGE_VIRTUAL) + void *virtual; +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; +#endif /* private: the union with struct page is transitional */ }; struct page page; @@ -373,6 +370,12 @@ FOLIO_MATCH(_refcount, _refcount); #ifdef CONFIG_MEMCG FOLIO_MATCH(memcg_data, memcg_data); #endif +#if defined(WANT_PAGE_VIRTUAL) +FOLIO_MATCH(virtual, virtual); +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS +FOLIO_MATCH(_last_cpupid, _last_cpupid); +#endif #undef FOLIO_MATCH #define FOLIO_MATCH(pg, fl) \ static_assert(offsetof(struct folio, fl) == \ @@ -546,14 +549,65 @@ struct anon_vma_name { char name[]; }; +#ifdef CONFIG_ANON_VMA_NAME +/* + * mmap_lock should be read-locked when calling anon_vma_name(). Caller should + * either keep holding the lock while using the returned pointer or it should + * raise anon_vma_name refcount before releasing the lock. + */ +struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma); +struct anon_vma_name *anon_vma_name_alloc(const char *name); +void anon_vma_name_free(struct kref *kref); +#else /* CONFIG_ANON_VMA_NAME */ +static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) +{ + return NULL; +} + +static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) +{ + return NULL; +} +#endif + struct vma_lock { struct rw_semaphore lock; }; struct vma_numab_state { + /* + * Initialised as time in 'jiffies' after which VMA + * should be scanned. Delays first scan of new VMA by at + * least sysctl_numa_balancing_scan_delay: + */ unsigned long next_scan; - unsigned long next_pid_reset; - unsigned long access_pids[2]; + + /* + * Time in jiffies when pids_active[] is reset to + * detect phase change behaviour: + */ + unsigned long pids_active_reset; + + /* + * Approximate tracking of PIDs that trapped a NUMA hinting + * fault. May produce false positives due to hash collisions. + * + * [0] Previous PID tracking + * [1] Current PID tracking + * + * Window moves after next_pid_reset has expired approximately + * every VMA_PID_RESET_PERIOD jiffies: + */ + unsigned long pids_active[2]; + + /* MM scan sequence ID when scan first started after VMA creation */ + int start_scan_seq; + + /* + * MM scan sequence ID when the VMA was last completely scanned. + * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq + */ + int prev_scan_seq; }; /* @@ -662,6 +716,12 @@ struct vm_area_struct { struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; +#ifdef CONFIG_NUMA +#define vma_policy(vma) ((vma)->vm_policy) +#else +#define vma_policy(vma) NULL +#endif + #ifdef CONFIG_SCHED_MM_CID struct mm_cid { u64 time; diff --git a/include/linux/mman.h b/include/linux/mman.h index 40d94411d4..dc7048824b 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -156,6 +156,7 @@ calc_vm_flag_bits(unsigned long flags) return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | + _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | arch_calc_vm_flag_bits(flags); } diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 62a6847a3b..2f445c6517 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -526,6 +526,7 @@ struct mmc_host { /* Host Software Queue support */ bool hsq_enabled; + int hsq_depth; u32 err_stats[MMC_ERR_MAX]; unsigned long private[] ____cacheline_aligned; diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 6e3c857606..f349e08a9d 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -459,7 +459,14 @@ mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) lock_map_release(&__mmu_notifier_invalidate_range_start_map); } -static inline int +/* + * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it + * can return an error if a notifier can't proceed without blocking, in which + * case you're not allowed to modify PTEs in the specified range. + * + * This is mainly intended for OOM handling. + */ +static inline int __must_check mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) { int ret = 0; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f43a1cdcc6..2c34e9325c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -643,8 +643,6 @@ struct lruvec { #endif }; -/* Isolate unmapped pages */ -#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) /* Isolate unevictable pages */ @@ -680,15 +678,34 @@ enum zone_watermarks { #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) +/* + * Flags used in pcp->flags field. + * + * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the + * previous page freeing. To avoid to drain PCP for an accident + * high-order page freeing. + * + * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before + * draining PCP for consecutive high-order pages freeing without + * allocation if data cache slice of CPU is large enough. To reduce + * zone lock contention and keep cache-hot pages reusing. + */ +#define PCPF_PREV_FREE_HIGH_ORDER BIT(0) +#define PCPF_FREE_HIGH_BATCH BIT(1) + struct per_cpu_pages { spinlock_t lock; /* Protects lists field */ int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ + int high_min; /* min high watermark */ + int high_max; /* max high watermark */ int batch; /* chunk size for buddy add/remove */ - short free_factor; /* batch scaling factor during free */ + u8 flags; /* protected by pcp->lock */ + u8 alloc_factor; /* batch scaling factor during allocate */ #ifdef CONFIG_NUMA - short expire; /* When 0, remote pagesets are drained */ + u8 expire; /* When 0, remote pagesets are drained */ #endif + short free_count; /* consecutive free count */ /* Lists of pages, one per migrate type stored on the pcp-lists */ struct list_head lists[NR_PCP_LISTS]; @@ -841,7 +858,8 @@ struct zone { * the high and batch values are copied to individual pagesets for * faster access */ - int pageset_high; + int pageset_high_min; + int pageset_high_max; int pageset_batch; #ifndef CONFIG_SPARSEMEM @@ -1002,6 +1020,7 @@ enum zone_flags { * Cleared when kswapd is woken. */ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ + ZONE_BELOW_HIGH, /* zone is below high watermark. */ }; static inline unsigned long zone_managed_pages(struct zone *zone) @@ -2006,9 +2025,9 @@ static inline int pfn_valid(unsigned long pfn) if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; ms = __pfn_to_section(pfn); - rcu_read_lock(); + rcu_read_lock_sched(); if (!valid_section(ms)) { - rcu_read_unlock(); + rcu_read_unlock_sched(); return 0; } /* @@ -2016,7 +2035,7 @@ static inline int pfn_valid(unsigned long pfn) * the entire section-sized span. */ ret = early_section(ms) || pfn_section_valid(ms, pfn); - rcu_read_unlock(); + rcu_read_unlock_sched(); return ret; } diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h index 057c89867a..b8da2db4ec 100644 --- a/include/linux/mnt_idmapping.h +++ b/include/linux/mnt_idmapping.h @@ -115,6 +115,9 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid) int vfsgid_in_group_p(vfsgid_t vfsgid); +struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap); +void mnt_idmap_put(struct mnt_idmap *idmap); + vfsuid_t make_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kuid_t kuid); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b0678b093c..f458469c5c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -935,6 +935,12 @@ enum { * struct cdx_device_id - CDX device identifier * @vendor: Vendor ID * @device: Device ID + * @subvendor: Subsystem vendor ID (or CDX_ANY_ID) + * @subdevice: Subsystem device ID (or CDX_ANY_ID) + * @class: Device class + * Most drivers do not need to specify class/class_mask + * as vendor/device is normally sufficient. + * @class_mask: Limit which sub-fields of the class field are compared. * @override_only: Match only when dev->driver_override is this driver. * * Type of entries in the "device Id" table for CDX devices supported by @@ -943,7 +949,15 @@ enum { struct cdx_device_id { __u16 vendor; __u16 device; + __u16 subvendor; + __u16 subdevice; + __u32 class; + __u32 class_mask; __u32 override_only; }; +struct vchiq_device_id { + char name[32]; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h index 1269543d06..77c9895b9d 100644 --- a/include/linux/module_symbol.h +++ b/include/linux/module_symbol.h @@ -3,7 +3,7 @@ #define _LINUX_MODULE_SYMBOL_H /* This ignores the intensely annoying "mapping symbols" found in ELF files. */ -static inline int is_mapping_symbol(const char *str) +static inline bool is_mapping_symbol(const char *str) { if (str[0] == '.' && str[1] == 'L') return true; diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 962cd41a2c..4fa9726bc3 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -276,7 +276,7 @@ struct kparam_array read-only sections (which is part of respective UNIX ABI on these platforms). So 'const' makes no sense and even causes compile failures with some compilers. */ -#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#if defined(CONFIG_ALPHA) || defined(CONFIG_PPC64) #define __moduleparam_const #else #define __moduleparam_const const @@ -293,7 +293,11 @@ struct kparam_array = { __param_str_##name, THIS_MODULE, ops, \ VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } } -/* Obsolete - use module_param_cb() */ +/* + * Useful for describing a set/get pair used only once (i.e. for this + * parameter). For repeated set/get pairs (i.e. the same struct + * kernel_param_ops), use module_param_cb() instead. + */ #define module_param_call(name, _set, _get, arg, perm) \ static const struct kernel_param_ops __param_ops_##name = \ { .flags = 0, .set = _set, .get = _get }; \ diff --git a/include/linux/mount.h b/include/linux/mount.h index 4f40b40306..ac3dd28761 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -92,8 +92,8 @@ extern bool __mnt_is_readonly(struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); extern struct vfsmount *clone_private_mount(const struct path *path); -extern int __mnt_want_write(struct vfsmount *); -extern void __mnt_drop_write(struct vfsmount *); +int mnt_get_write_access(struct vfsmount *mnt); +void mnt_put_write_access(struct vfsmount *mnt); extern struct vfsmount *fc_mount(struct fs_context *fc); extern struct vfsmount *vfs_create_mount(struct fs_context *fc); diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index d88bb56c18..947410faf9 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -287,7 +287,7 @@ struct cfi_private { unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ unsigned long quirks; - struct flchip chips[]; /* per-chip data structure for each chip */ + struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index 2e3f43788d..0421f12156 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -24,7 +24,7 @@ struct lpddr_private { struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; - struct flchip chips[]; + struct flchip chips[] __counted_by(numchips); }; /* qinfo_query_info structure contains request information for diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 3e285c09d1..badb4c1ac0 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -263,6 +263,7 @@ struct spinand_manufacturer { extern const struct spinand_manufacturer alliancememory_spinand_manufacturer; extern const struct spinand_manufacturer ato_spinand_manufacturer; extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer; +extern const struct spinand_manufacturer foresee_spinand_manufacturer; extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; diff --git a/include/linux/namei.h b/include/linux/namei.h index 1463cbda48..74e0cc14eb 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -66,6 +66,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int); extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); +extern struct dentry *user_path_locked_at(int , const char __user *, struct path *); int vfs_path_parent_lookup(struct filename *filename, unsigned int flags, struct path *parent, struct qstr *last, int *type, const struct path *root); @@ -92,6 +93,30 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *); extern struct dentry *lock_rename_child(struct dentry *, struct dentry *); extern void unlock_rename(struct dentry *, struct dentry *); +/** + * mode_strip_umask - handle vfs umask stripping + * @dir: parent directory of the new inode + * @mode: mode of the new inode to be created in @dir + * + * In most filesystems, umask stripping depends on whether or not the + * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask + * stripping is done directly in here. If the filesystem does support POSIX + * ACLs umask stripping is deferred until the filesystem calls + * posix_acl_create(). + * + * Some filesystems (like NFSv4) also want to avoid umask stripping by the + * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK + * to get this effect without declaring that they support POSIX ACLs. + * + * Returns: mode + */ +static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode) +{ + if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK)) + mode &= ~current_umask(); + return mode; +} + extern int __must_check nd_jump_link(const struct path *path); static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) @@ -112,7 +137,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) static inline bool retry_estale(const long error, const unsigned int flags) { - return error == -ESTALE && !(flags & LOOKUP_REVAL); + return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL)); } #endif /* _LINUX_NAMEI_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b8e60a2041..2564e20946 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -79,6 +79,8 @@ struct xdp_buff; struct xdp_frame; struct xdp_metadata_ops; struct xdp_md; +/* DPLL specific */ +struct dpll_pin; typedef u32 xdp_features_t; @@ -480,6 +482,29 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n) return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); } +/** + * napi_is_scheduled - test if NAPI is scheduled + * @n: NAPI context + * + * This check is "best-effort". With no locking implemented, + * a NAPI can be scheduled or terminate right after this check + * and produce not precise results. + * + * NAPI_STATE_SCHED is an internal state, napi_is_scheduled + * should not be used normally and napi_schedule should be + * used instead. + * + * Use only if the driver really needs to check if a NAPI + * is scheduled for example in the context of delayed timer + * that can be skipped if a NAPI is already scheduled. + * + * Return True if NAPI is scheduled, False otherwise. + */ +static inline bool napi_is_scheduled(struct napi_struct *n) +{ + return test_bit(NAPI_STATE_SCHED, &n->state); +} + bool napi_schedule_prep(struct napi_struct *n); /** @@ -488,11 +513,18 @@ bool napi_schedule_prep(struct napi_struct *n); * * Schedule NAPI poll routine to be called if it is not already * running. + * Return true if we schedule a NAPI or false if not. + * Refer to napi_schedule_prep() for additional reason on why + * a NAPI might not be scheduled. */ -static inline void napi_schedule(struct napi_struct *n) +static inline bool napi_schedule(struct napi_struct *n) { - if (napi_schedule_prep(n)) + if (napi_schedule_prep(n)) { __napi_schedule(n); + return true; + } + + return false; } /** @@ -507,16 +539,6 @@ static inline void napi_schedule_irqoff(struct napi_struct *n) __napi_schedule_irqoff(n); } -/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ -static inline bool napi_reschedule(struct napi_struct *napi) -{ - if (napi_schedule_prep(napi)) { - __napi_schedule(napi); - return true; - } - return false; -} - /** * napi_complete_done - NAPI processing complete * @n: NAPI context @@ -917,6 +939,7 @@ struct net_device_path { u8 queue; u16 wcid; u8 bss; + u8 amsdu; } mtk_wdma; }; }; @@ -1287,9 +1310,7 @@ struct netdev_net_notifier { * struct net_device *dev, * const unsigned char *addr, u16 vid) * Deletes the FDB entry from dev coresponding to addr. - * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[], - * struct net_device *dev, - * u16 vid, + * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, * struct netlink_ext_ack *extack); * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, * struct net_device *dev, struct net_device *filter_dev, @@ -1564,10 +1585,8 @@ struct net_device_ops { struct net_device *dev, const unsigned char *addr, u16 vid, struct netlink_ext_ack *extack); - int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, - struct nlattr *tb[], + int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, - u16 vid, struct netlink_ext_ack *extack); int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, @@ -1590,6 +1609,10 @@ struct net_device_ops { int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, struct netlink_callback *cb); + int (*ndo_mdb_get)(struct net_device *dev, + struct nlattr *tb[], u32 portid, + u32 seq, + struct netlink_ext_ack *extack); int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, @@ -2060,6 +2083,9 @@ enum netdev_stat_type { * SET_NETDEV_DEVLINK_PORT macro. This pointer is static * during the time netdevice is registered. * + * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, + * where the clock is recovered. + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -2417,6 +2443,10 @@ struct net_device { struct rtnl_hw_stats64 *offload_xstats_l3; struct devlink_port *devlink_port; + +#if IS_ENABLED(CONFIG_DPLL) + struct dpll_pin *dpll_pin; +#endif }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -3962,6 +3992,18 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); int dev_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid, bool recurse); bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); +void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin); +void netdev_dpll_pin_clear(struct net_device *dev); + +static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev) +{ +#if IS_ENABLED(CONFIG_DPLL) + return dev->dpll_pin; +#else + return NULL; +#endif +} + struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -4002,32 +4044,19 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev, return false; } -struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev); - -static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev) -{ - /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ - struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); - - if (likely(p)) - return p; - - return netdev_core_stats_alloc(dev); -} +void netdev_core_stats_inc(struct net_device *dev, u32 offset); #define DEV_CORE_STATS_INC(FIELD) \ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ { \ - struct net_device_core_stats __percpu *p; \ - \ - p = dev_core_stats(dev); \ - if (p) \ - this_cpu_inc(p->FIELD); \ + netdev_core_stats_inc(dev, \ + offsetof(struct net_device_core_stats, FIELD)); \ } DEV_CORE_STATS_INC(rx_dropped) DEV_CORE_STATS_INC(tx_dropped) DEV_CORE_STATS_INC(rx_nohandler) DEV_CORE_STATS_INC(rx_otherhost_dropped) +#undef DEV_CORE_STATS_INC static __always_inline int ____dev_forward_skb(struct net_device *dev, struct sk_buff *skb, diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index d68644b7c2..80900d9109 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -22,6 +22,16 @@ static inline int NF_DROP_GETERR(int verdict) return -(verdict >> NF_VERDICT_QBITS); } +static __always_inline int +NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err) +{ + BUILD_BUG_ON(err > 0xffff); + + kfree_skb_reason(skb, reason); + + return ((err << 16) | NF_STOLEN); +} + static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) { diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index e8c350a3ad..e9f4f845d7 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -186,6 +186,8 @@ struct ip_set_type_variant { /* Return true if "b" set is the same as "a" * according to the create set parameters */ bool (*same_set)(const struct ip_set *a, const struct ip_set *b); + /* Cancel ongoing garbage collectors before destroying the set*/ + void (*cancel_gc)(struct ip_set *set); /* Region-locking is used */ bool region_lock; }; @@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type); /* A generic IP set */ struct ip_set { + /* For call_cru in destroy */ + struct rcu_head rcu; /* The name of the set */ char name[IPSET_MAXNAMELEN]; /* Lock protecting the set data */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 730003c4f4..c11c4db346 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -150,7 +150,7 @@ enum nfs_opnum4 { OP_WRITE_SAME = 70, OP_CLONE = 71, - /* xattr support (RFC8726) */ + /* xattr support (RFC8276) */ OP_GETXATTR = 72, OP_SETXATTR = 73, OP_LISTXATTRS = 74, @@ -389,79 +389,203 @@ enum lock_type4 { NFS4_WRITEW_LT = 4 }; +/* + * Symbol names and values are from RFC 7531 Section 2. + * "XDR Description of NFSv4.0" + */ +enum { + FATTR4_SUPPORTED_ATTRS = 0, + FATTR4_TYPE = 1, + FATTR4_FH_EXPIRE_TYPE = 2, + FATTR4_CHANGE = 3, + FATTR4_SIZE = 4, + FATTR4_LINK_SUPPORT = 5, + FATTR4_SYMLINK_SUPPORT = 6, + FATTR4_NAMED_ATTR = 7, + FATTR4_FSID = 8, + FATTR4_UNIQUE_HANDLES = 9, + FATTR4_LEASE_TIME = 10, + FATTR4_RDATTR_ERROR = 11, + FATTR4_ACL = 12, + FATTR4_ACLSUPPORT = 13, + FATTR4_ARCHIVE = 14, + FATTR4_CANSETTIME = 15, + FATTR4_CASE_INSENSITIVE = 16, + FATTR4_CASE_PRESERVING = 17, + FATTR4_CHOWN_RESTRICTED = 18, + FATTR4_FILEHANDLE = 19, + FATTR4_FILEID = 20, + FATTR4_FILES_AVAIL = 21, + FATTR4_FILES_FREE = 22, + FATTR4_FILES_TOTAL = 23, + FATTR4_FS_LOCATIONS = 24, + FATTR4_HIDDEN = 25, + FATTR4_HOMOGENEOUS = 26, + FATTR4_MAXFILESIZE = 27, + FATTR4_MAXLINK = 28, + FATTR4_MAXNAME = 29, + FATTR4_MAXREAD = 30, + FATTR4_MAXWRITE = 31, + FATTR4_MIMETYPE = 32, + FATTR4_MODE = 33, + FATTR4_NO_TRUNC = 34, + FATTR4_NUMLINKS = 35, + FATTR4_OWNER = 36, + FATTR4_OWNER_GROUP = 37, + FATTR4_QUOTA_AVAIL_HARD = 38, + FATTR4_QUOTA_AVAIL_SOFT = 39, + FATTR4_QUOTA_USED = 40, + FATTR4_RAWDEV = 41, + FATTR4_SPACE_AVAIL = 42, + FATTR4_SPACE_FREE = 43, + FATTR4_SPACE_TOTAL = 44, + FATTR4_SPACE_USED = 45, + FATTR4_SYSTEM = 46, + FATTR4_TIME_ACCESS = 47, + FATTR4_TIME_ACCESS_SET = 48, + FATTR4_TIME_BACKUP = 49, + FATTR4_TIME_CREATE = 50, + FATTR4_TIME_DELTA = 51, + FATTR4_TIME_METADATA = 52, + FATTR4_TIME_MODIFY = 53, + FATTR4_TIME_MODIFY_SET = 54, + FATTR4_MOUNTED_ON_FILEID = 55, +}; + +/* + * Symbol names and values are from RFC 5662 Section 2. + * "XDR Description of NFSv4.1" + */ +enum { + FATTR4_DIR_NOTIF_DELAY = 56, + FATTR4_DIRENT_NOTIF_DELAY = 57, + FATTR4_DACL = 58, + FATTR4_SACL = 59, + FATTR4_CHANGE_POLICY = 60, + FATTR4_FS_STATUS = 61, + FATTR4_FS_LAYOUT_TYPES = 62, + FATTR4_LAYOUT_HINT = 63, + FATTR4_LAYOUT_TYPES = 64, + FATTR4_LAYOUT_BLKSIZE = 65, + FATTR4_LAYOUT_ALIGNMENT = 66, + FATTR4_FS_LOCATIONS_INFO = 67, + FATTR4_MDSTHRESHOLD = 68, + FATTR4_RETENTION_GET = 69, + FATTR4_RETENTION_SET = 70, + FATTR4_RETENTEVT_GET = 71, + FATTR4_RETENTEVT_SET = 72, + FATTR4_RETENTION_HOLD = 73, + FATTR4_MODE_SET_MASKED = 74, + FATTR4_SUPPATTR_EXCLCREAT = 75, + FATTR4_FS_CHARSET_CAP = 76, +}; + +/* + * Symbol names and values are from RFC 7863 Section 2. + * "XDR Description of NFSv4.2" + */ +enum { + FATTR4_CLONE_BLKSIZE = 77, + FATTR4_SPACE_FREED = 78, + FATTR4_CHANGE_ATTR_TYPE = 79, + FATTR4_SEC_LABEL = 80, +}; + +/* + * Symbol names and values are from RFC 8275 Section 5. + * "The mode_umask Attribute" + */ +enum { + FATTR4_MODE_UMASK = 81, +}; + +/* + * Symbol names and values are from RFC 8276 Section 8.6. + * "Numeric Values Assigned to Protocol Extensions" + */ +enum { + FATTR4_XATTR_SUPPORT = 82, +}; + +/* + * The following internal definitions enable processing the above + * attribute bits within 32-bit word boundaries. + */ /* Mandatory Attributes */ -#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0) -#define FATTR4_WORD0_TYPE (1UL << 1) -#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2) -#define FATTR4_WORD0_CHANGE (1UL << 3) -#define FATTR4_WORD0_SIZE (1UL << 4) -#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5) -#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6) -#define FATTR4_WORD0_NAMED_ATTR (1UL << 7) -#define FATTR4_WORD0_FSID (1UL << 8) -#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) -#define FATTR4_WORD0_LEASE_TIME (1UL << 10) -#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) +#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS) +#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE) +#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE) +#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE) +#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE) +#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT) +#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT) +#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR) +#define FATTR4_WORD0_FSID BIT(FATTR4_FSID) +#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES) +#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME) +#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR) /* Mandatory in NFSv4.1 */ -#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11) +#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64) /* Recommended Attributes */ -#define FATTR4_WORD0_ACL (1UL << 12) -#define FATTR4_WORD0_ACLSUPPORT (1UL << 13) -#define FATTR4_WORD0_ARCHIVE (1UL << 14) -#define FATTR4_WORD0_CANSETTIME (1UL << 15) -#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16) -#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17) -#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18) -#define FATTR4_WORD0_FILEHANDLE (1UL << 19) -#define FATTR4_WORD0_FILEID (1UL << 20) -#define FATTR4_WORD0_FILES_AVAIL (1UL << 21) -#define FATTR4_WORD0_FILES_FREE (1UL << 22) -#define FATTR4_WORD0_FILES_TOTAL (1UL << 23) -#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24) -#define FATTR4_WORD0_HIDDEN (1UL << 25) -#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26) -#define FATTR4_WORD0_MAXFILESIZE (1UL << 27) -#define FATTR4_WORD0_MAXLINK (1UL << 28) -#define FATTR4_WORD0_MAXNAME (1UL << 29) -#define FATTR4_WORD0_MAXREAD (1UL << 30) -#define FATTR4_WORD0_MAXWRITE (1UL << 31) -#define FATTR4_WORD1_MIMETYPE (1UL << 0) -#define FATTR4_WORD1_MODE (1UL << 1) -#define FATTR4_WORD1_NO_TRUNC (1UL << 2) -#define FATTR4_WORD1_NUMLINKS (1UL << 3) -#define FATTR4_WORD1_OWNER (1UL << 4) -#define FATTR4_WORD1_OWNER_GROUP (1UL << 5) -#define FATTR4_WORD1_QUOTA_HARD (1UL << 6) -#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7) -#define FATTR4_WORD1_QUOTA_USED (1UL << 8) -#define FATTR4_WORD1_RAWDEV (1UL << 9) -#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10) -#define FATTR4_WORD1_SPACE_FREE (1UL << 11) -#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12) -#define FATTR4_WORD1_SPACE_USED (1UL << 13) -#define FATTR4_WORD1_SYSTEM (1UL << 14) -#define FATTR4_WORD1_TIME_ACCESS (1UL << 15) -#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16) -#define FATTR4_WORD1_TIME_BACKUP (1UL << 17) -#define FATTR4_WORD1_TIME_CREATE (1UL << 18) -#define FATTR4_WORD1_TIME_DELTA (1UL << 19) -#define FATTR4_WORD1_TIME_METADATA (1UL << 20) -#define FATTR4_WORD1_TIME_MODIFY (1UL << 21) -#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) -#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) -#define FATTR4_WORD1_DACL (1UL << 26) -#define FATTR4_WORD1_SACL (1UL << 27) -#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) -#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) -#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) -#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) -#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) -#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) -#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) -#define FATTR4_WORD2_MODE_UMASK (1UL << 17) -#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) +#define FATTR4_WORD0_ACL BIT(FATTR4_ACL) +#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT) +#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE) +#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME) +#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE) +#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING) +#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED) +#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE) +#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID) +#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL) +#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE) +#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL) +#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS) +#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN) +#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS) +#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE) +#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK) +#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME) +#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD) +#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE) + +#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32) +#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32) +#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32) +#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32) +#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32) +#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32) +#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32) +#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32) +#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32) +#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32) +#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32) +#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32) +#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32) +#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32) +#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32) +#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32) +#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32) +#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32) +#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32) +#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32) +#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32) +#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32) +#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32) +#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32) +#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32) +#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32) +#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32) + +#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64) +#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64) +#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64) +#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64) +#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64) +#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64) +#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64) +#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index cd628c4b01..cd797e00fe 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -239,6 +239,7 @@ struct nfs_server { struct list_head delegations; struct list_head ss_copies; + unsigned long delegation_gen; unsigned long mig_gen; unsigned long mig_status; #define NFS_MIG_IN_TRANSITION (1) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 12bbb5c636..539b57fbf3 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1772,7 +1772,7 @@ struct nfs_rpc_ops { void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); - int (*symlink) (struct inode *, struct dentry *, struct page *, + int (*symlink) (struct inode *, struct dentry *, struct folio *, unsigned int, struct iattr *); int (*mkdir) (struct inode *, struct dentry *, struct iattr *); int (*rmdir) (struct inode *, const struct qstr *); diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h index dcb8030062..c1d0bc5d96 100644 --- a/include/linux/nvme-auth.h +++ b/include/linux/nvme-auth.h @@ -9,9 +9,9 @@ #include <crypto/kpp.h> struct nvme_dhchap_key { - u8 *key; size_t len; u8 hash; + u8 key[]; }; u32 nvme_auth_get_seqnum(void); @@ -24,10 +24,13 @@ const char *nvme_auth_digest_name(u8 hmac_id); size_t nvme_auth_hmac_hash_len(u8 hmac_id); u8 nvme_auth_hmac_id(const char *hmac_name); +u32 nvme_auth_key_struct_size(u32 key_len); struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, u8 key_hash); void nvme_auth_free_key(struct nvme_dhchap_key *key); -u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn); +struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash); +struct nvme_dhchap_key *nvme_auth_transform_key( + struct nvme_dhchap_key *key, char *nqn); int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key); int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, u8 *challenge, u8 *aug, size_t hlen); diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h new file mode 100644 index 0000000000..e10333d78d --- /dev/null +++ b/include/linux/nvme-keyring.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2023 Hannes Reinecke, SUSE Labs + */ + +#ifndef _NVME_KEYRING_H +#define _NVME_KEYRING_H + +#if IS_ENABLED(CONFIG_NVME_KEYRING) + +key_serial_t nvme_tls_psk_default(struct key *keyring, + const char *hostnqn, const char *subnqn); + +key_serial_t nvme_keyring_id(void); + +#else + +static inline key_serial_t nvme_tls_psk_default(struct key *keyring, + const char *hostnqn, const char *subnqn) +{ + return 0; +} +static inline key_serial_t nvme_keyring_id(void) +{ + return 0; +} +#endif /* !CONFIG_NVME_KEYRING */ +#endif /* _NVME_KEYRING_H */ diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index 57ebe1267f..e07e8978d6 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h @@ -18,6 +18,12 @@ enum nvme_tcp_pfv { NVME_TCP_PFV_1_0 = 0x0, }; +enum nvme_tcp_tls_cipher { + NVME_TCP_TLS_CIPHER_INVALID = 0, + NVME_TCP_TLS_CIPHER_SHA256 = 1, + NVME_TCP_TLS_CIPHER_SHA384 = 2, +}; + enum nvme_tcp_fatal_error_status { NVME_TCP_FES_INVALID_PDU_HDR = 0x01, NVME_TCP_FES_PDU_SEQ_ERR = 0x02, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 26dd3f859d..44325c068b 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -108,6 +108,13 @@ enum { NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ }; +/* TSAS SECTYPE for TCP transport */ +enum { + NVMF_TCP_SECTYPE_NONE = 0, /* No Security */ + NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */ + NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */ +}; + #define NVME_AQ_DEPTH 32 #define NVME_NR_AEN_COMMANDS 1 #define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) @@ -1493,6 +1500,9 @@ struct nvmf_disc_rsp_page_entry { __u16 pkey; __u8 resv10[246]; } rdma; + struct tcp { + __u8 sectype; + } tcp; } tsas; }; @@ -1722,7 +1732,7 @@ struct nvmf_auth_dhchap_success1_data { __u8 rsvd2; __u8 rvalid; __u8 rsvd3[7]; - /* 'hl' bytes of response value if 'rvalid' is set */ + /* 'hl' bytes of response value */ __u8 rval[]; }; diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 4523e4e833..6ec4b9743e 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -127,6 +127,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell, return -EOPNOTSUPP; } +static inline int nvmem_cell_read_u8(struct device *dev, + const char *cell_id, u8 *val) +{ + return -EOPNOTSUPP; +} + static inline int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) { diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index dae26295e6..e393083523 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -82,13 +82,13 @@ struct nvmem_cell_info { * @owner: Pointer to exporter module. Used for refcounting. * @cells: Optional array of pre-defined NVMEM cells. * @ncells: Number of elements in cells. + * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax. * @keepout: Optional array of keepout ranges (sorted ascending by start). * @nkeepout: Number of elements in the keepout array. * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. * @of_node: If given, this will be used instead of the parent's of_node. - * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. * @size: Device size. @@ -112,6 +112,7 @@ struct nvmem_config { struct module *owner; const struct nvmem_cell_info *cells; int ncells; + bool add_legacy_fixed_of_cells; const struct nvmem_keepout *keepout; unsigned int nkeepout; enum nvmem_type type; @@ -120,7 +121,6 @@ struct nvmem_config { bool ignore_wp; struct nvmem_layout *layout; struct device_node *of_node; - bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; int size; diff --git a/include/linux/objpool.h b/include/linux/objpool.h new file mode 100644 index 0000000000..15aff4a17f --- /dev/null +++ b/include/linux/objpool.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_OBJPOOL_H +#define _LINUX_OBJPOOL_H + +#include <linux/types.h> +#include <linux/refcount.h> + +/* + * objpool: ring-array based lockless MPMC queue + * + * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org + * + * objpool is a scalable implementation of high performance queue for + * object allocation and reclamation, such as kretprobe instances. + * + * With leveraging percpu ring-array to mitigate hot spots of memory + * contention, it delivers near-linear scalability for high parallel + * scenarios. The objpool is best suited for the following cases: + * 1) Memory allocation or reclamation are prohibited or too expensive + * 2) Consumers are of different priorities, such as irqs and threads + * + * Limitations: + * 1) Maximum objects (capacity) is fixed after objpool creation + * 2) All pre-allocated objects are managed in percpu ring array, + * which consumes more memory than linked lists + */ + +/** + * struct objpool_slot - percpu ring array of objpool + * @head: head sequence of the local ring array (to retrieve at) + * @tail: tail sequence of the local ring array (to append at) + * @last: the last sequence number marked as ready for retrieve + * @mask: bits mask for modulo capacity to compute array indexes + * @entries: object entries on this slot + * + * Represents a cpu-local array-based ring buffer, its size is specialized + * during initialization of object pool. The percpu objpool node is to be + * allocated from local memory for NUMA system, and to be kept compact in + * continuous memory: CPU assigned number of objects are stored just after + * the body of objpool_node. + * + * Real size of the ring array is far too smaller than the value range of + * head and tail, typed as uint32_t: [0, 2^32), so only lower bits (mask) + * of head and tail are used as the actual position in the ring array. In + * general the ring array is acting like a small sliding window, which is + * always moving forward in the loop of [0, 2^32). + */ +struct objpool_slot { + uint32_t head; + uint32_t tail; + uint32_t last; + uint32_t mask; + void *entries[]; +} __packed; + +struct objpool_head; + +/* + * caller-specified callback for object initial setup, it's only called + * once for each object (just after the memory allocation of the object) + */ +typedef int (*objpool_init_obj_cb)(void *obj, void *context); + +/* caller-specified cleanup callback for objpool destruction */ +typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context); + +/** + * struct objpool_head - object pooling metadata + * @obj_size: object size, aligned to sizeof(void *) + * @nr_objs: total objs (to be pre-allocated with objpool) + * @nr_cpus: local copy of nr_cpu_ids + * @capacity: max objs can be managed by one objpool_slot + * @gfp: gfp flags for kmalloc & vmalloc + * @ref: refcount of objpool + * @flags: flags for objpool management + * @cpu_slots: pointer to the array of objpool_slot + * @release: resource cleanup callback + * @context: caller-provided context + */ +struct objpool_head { + int obj_size; + int nr_objs; + int nr_cpus; + int capacity; + gfp_t gfp; + refcount_t ref; + unsigned long flags; + struct objpool_slot **cpu_slots; + objpool_fini_cb release; + void *context; +}; + +#define OBJPOOL_NR_OBJECT_MAX (1UL << 24) /* maximum numbers of total objects */ +#define OBJPOOL_OBJECT_SIZE_MAX (1UL << 16) /* maximum size of an object */ + +/** + * objpool_init() - initialize objpool and pre-allocated objects + * @pool: the object pool to be initialized, declared by caller + * @nr_objs: total objects to be pre-allocated by this object pool + * @object_size: size of an object (should be > 0) + * @gfp: flags for memory allocation (via kmalloc or vmalloc) + * @context: user context for object initialization callback + * @objinit: object initialization callback for extra setup + * @release: cleanup callback for extra cleanup task + * + * return value: 0 for success, otherwise error code + * + * All pre-allocated objects are to be zeroed after memory allocation. + * Caller could do extra initialization in objinit callback. objinit() + * will be called just after slot allocation and called only once for + * each object. After that the objpool won't touch any content of the + * objects. It's caller's duty to perform reinitialization after each + * pop (object allocation) or do clearance before each push (object + * reclamation). + */ +int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, + gfp_t gfp, void *context, objpool_init_obj_cb objinit, + objpool_fini_cb release); + +/** + * objpool_pop() - allocate an object from objpool + * @pool: object pool + * + * return value: object ptr or NULL if failed + */ +void *objpool_pop(struct objpool_head *pool); + +/** + * objpool_push() - reclaim the object and return back to objpool + * @obj: object ptr to be pushed to objpool + * @pool: object pool + * + * return: 0 or error code (it fails only when user tries to push + * the same object multiple times or wrong "objects" into objpool) + */ +int objpool_push(void *obj, struct objpool_head *pool); + +/** + * objpool_drop() - discard the object and deref objpool + * @obj: object ptr to be discarded + * @pool: object pool + * + * return: 0 if objpool was released; -EAGAIN if there are still + * outstanding objects + * + * objpool_drop is normally for the release of outstanding objects + * after objpool cleanup (objpool_fini). Thinking of this example: + * kretprobe is unregistered and objpool_fini() is called to release + * all remained objects, but there are still objects being used by + * unfinished kretprobes (like blockable function: sys_accept). So + * only when the last outstanding object is dropped could the whole + * objpool be released along with the call of objpool_drop() + */ +int objpool_drop(void *obj, struct objpool_head *pool); + +/** + * objpool_free() - release objpool forcely (all objects to be freed) + * @pool: object pool to be released + */ +void objpool_free(struct objpool_head *pool); + +/** + * objpool_fini() - deref object pool (also releasing unused objects) + * @pool: object pool to be dereferenced + * + * objpool_fini() will try to release all remained free objects and + * then drop an extra reference of the objpool. If all objects are + * already returned to objpool (so called synchronous use cases), + * the objpool itself will be freed together. But if there are still + * outstanding objects (so called asynchronous use cases, such like + * blockable kretprobe), the objpool won't be released until all + * the outstanding objects are dropped, but the caller must assure + * there are no concurrent objpool_push() on the fly. Normally RCU + * is being required to make sure all ongoing objpool_push() must + * be finished before calling objpool_fini(), so does test_objpool, + * kretprobe or rethook + */ +void objpool_fini(struct objpool_head *pool); + +#endif /* _LINUX_OBJPOOL_H */ diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index f86a08ba02..3921fbed0b 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -17,12 +17,10 @@ * build_OID_registry.pl to generate the data for look_up_OID(). */ enum OID { - OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ - OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ @@ -30,10 +28,6 @@ enum OID { /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ - OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */ - OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */ - OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */ - OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */ @@ -49,11 +43,6 @@ enum OID { OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */ - /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */ - OID_md2, /* 1.2.840.113549.2.2 */ - OID_md4, /* 1.2.840.113549.2.4 */ - OID_md5, /* 1.2.840.113549.2.5 */ - OID_mskrb5, /* 1.2.840.48018.1.2.2 */ OID_krb5, /* 1.2.840.113554.1.2.2 */ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */ @@ -75,7 +64,6 @@ enum OID { OID_PKU2U, /* 1.3.5.1.5.2.7 */ OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ - OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ @@ -141,6 +129,17 @@ enum OID { OID_TPMImportableKey, /* 2.23.133.10.1.4 */ OID_TPMSealedData, /* 2.23.133.10.1.5 */ + /* CSOR FIPS-202 SHA-3 */ + OID_sha3_256, /* 2.16.840.1.101.3.4.2.8 */ + OID_sha3_384, /* 2.16.840.1.101.3.4.2.9 */ + OID_sha3_512, /* 2.16.840.1.101.3.4.2.10 */ + OID_id_ecdsa_with_sha3_256, /* 2.16.840.1.101.3.4.3.10 */ + OID_id_ecdsa_with_sha3_384, /* 2.16.840.1.101.3.4.3.11 */ + OID_id_ecdsa_with_sha3_512, /* 2.16.840.1.101.3.4.3.12 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_256, /* 2.16.840.1.101.3.4.3.14 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */ + OID__NR }; diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h index 5581dbd3bd..ea8fb31379 100644 --- a/include/linux/osq_lock.h +++ b/include/linux/osq_lock.h @@ -6,11 +6,6 @@ * An MCS like lock especially tailored for optimistic spinning for sleeping * lock implementations (mutex, rwsem, etc). */ -struct optimistic_spin_node { - struct optimistic_spin_node *next, *prev; - int locked; /* 1 if lock acquired */ - int cpu; /* encoded CPU # + 1 value */ -}; struct optimistic_spin_queue { /* diff --git a/include/linux/overflow.h b/include/linux/overflow.h index f9b60313ea..7b5cf4a5cd 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -309,4 +309,39 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) #define struct_size_t(type, member, count) \ struct_size((type *)NULL, member, count) +/** + * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family. + * Enables caller macro to pass (different) initializer. + * + * @type: structure type name, including "struct" keyword. + * @name: Name for a variable to define. + * @member: Name of the array member. + * @count: Number of elements in the array; must be compile-time const. + * @initializer: initializer expression (could be empty for no init). + */ +#define _DEFINE_FLEX(type, name, member, count, initializer) \ + _Static_assert(__builtin_constant_p(count), \ + "onstack flex array members require compile-time const count"); \ + union { \ + u8 bytes[struct_size_t(type, member, count)]; \ + type obj; \ + } name##_u initializer; \ + type *name = (type *)&name##_u + +/** + * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing + * flexible array member. + * + * @type: structure type name, including "struct" keyword. + * @name: Name for a variable to define. + * @member: Name of the array member. + * @count: Number of elements in the array; must be compile-time const. + * + * Define a zeroed, on-stack, instance of @type structure with a trailing + * flexible array member. + * Use __struct_size(@name) to get compile-time size of it afterwards. + */ +#define DEFINE_FLEX(type, name, member, count) \ + _DEFINE_FLEX(type, name, member, count, = {}) + #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5c02720c53..a88e64aceb 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -693,6 +693,25 @@ TESTPAGEFLAG_FALSE(Ksm, ksm) u64 stable_page_flags(struct page *page); /** + * folio_xor_flags_has_waiters - Change some folio flags. + * @folio: The folio. + * @mask: Bits set in this word will be changed. + * + * This must only be used for flags which are changed with the folio + * lock held. For example, it is unsafe to use for PG_dirty as that + * can be set without the folio lock held. It can also only be used + * on flags which are in the range 0-6 as some of the implementations + * only affect those bits. + * + * Return: Whether there are tasks waiting on the folio. + */ +static inline bool folio_xor_flags_has_waiters(struct folio *folio, + unsigned long mask) +{ + return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0)); +} + +/** * folio_test_uptodate - Is this folio up to date? * @folio: The folio. * diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 8c9608b217..06142ff7f9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -806,9 +806,6 @@ static inline pgoff_t folio_next_index(struct folio *folio) */ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return &folio->page; return folio_page(folio, index & (folio_nr_pages(folio) - 1)); } @@ -824,9 +821,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) */ static inline bool folio_contains(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return folio->index == index; return index - folio_index(folio) < folio_nr_pages(folio); } @@ -884,10 +878,9 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, } /* - * Get index of the page within radix-tree (but not for hugetlb pages). - * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) + * Get the offset in PAGE_SIZE (even for hugetlb pages). */ -static inline pgoff_t page_to_index(struct page *page) +static inline pgoff_t page_to_pgoff(struct page *page) { struct page *head; @@ -902,19 +895,6 @@ static inline pgoff_t page_to_index(struct page *page) return head->index + page - head; } -extern pgoff_t hugetlb_basepage_index(struct page *page); - -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). - * (TODO: hugetlb pages should have ->index in PAGE_SIZE) - */ -static inline pgoff_t page_to_pgoff(struct page *page) -{ - if (unlikely(PageHuge(page))) - return hugetlb_basepage_index(page); - return page_to_index(page); -} - /* * Return byte-offset into filesystem object for page. */ @@ -951,24 +931,16 @@ static inline loff_t folio_file_pos(struct folio *folio) /* * Get the offset in PAGE_SIZE (even for hugetlb folios). - * (TODO: hugetlb folios should have ->index in PAGE_SIZE) */ static inline pgoff_t folio_pgoff(struct folio *folio) { - if (unlikely(folio_test_hugetlb(folio))) - return hugetlb_basepage_index(&folio->page); return folio->index; } -extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, - unsigned long address); - static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; - if (unlikely(is_vm_hugetlb_page(vma))) - return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; @@ -1146,6 +1118,7 @@ static inline void wait_on_page_locked(struct page *page) folio_wait_locked(page_folio(page)); } +void folio_end_read(struct folio *folio, bool success); void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); diff --git a/include/linux/parport.h b/include/linux/parport.h index 999eddd619..fff39bc306 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -180,8 +180,6 @@ struct ieee1284_info { struct semaphore irq; }; -#define PARPORT_NAME_MAX_LEN 15 - /* A parallel port */ struct parport { unsigned long base; /* base address */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 3af5f29985..675937a5bd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -713,6 +713,30 @@ static inline bool pci_is_bridge(struct pci_dev *dev) dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; } +/** + * pci_is_vga - check if the PCI device is a VGA device + * + * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define + * VGA Base Class and Sub-Classes: + * + * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible + * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code) + * + * Return true if the PCI device is a VGA device and uses the legacy VGA + * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and + * aliases). + */ +static inline bool pci_is_vga(struct pci_dev *pdev) +{ + if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) + return true; + + if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA) + return true; + + return false; +} + #define for_each_pci_bridge(dev, bus) \ list_for_each_entry(dev, &bus->devices, bus_list) \ if (!pci_is_bridge(dev)) {} else @@ -1181,6 +1205,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn); struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); +struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from); + int pci_dev_present(const struct pci_device_id *ids); int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, @@ -1391,6 +1417,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); +int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); void pci_pme_active(struct pci_dev *dev, bool enable); @@ -1594,6 +1621,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata); +void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), + void *userdata); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); @@ -1953,6 +1982,9 @@ static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) { return NULL; } +static inline struct pci_dev *pci_get_base_class(unsigned int class, + struct pci_dev *from) +{ return NULL; } static inline int pci_dev_present(const struct pci_device_id *ids) { return 0; } @@ -1990,6 +2022,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; } static inline void pci_restore_state(struct pci_dev *dev) { } static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; } +static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) +{ return 0; } static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) { return 0; } static inline pci_power_t pci_choose_state(struct pci_dev *dev, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index fe4a3589bb..97cc0baad0 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -581,6 +581,7 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3 #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3 +#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b #define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 @@ -3064,6 +3065,7 @@ #define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 #define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 #define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 +#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728 #define PCI_DEVICE_ID_INTEL_HDA_RPL_S 0x7a50 #define PCI_DEVICE_ID_INTEL_HDA_ADL_S 0x7ad0 #define PCI_DEVICE_ID_INTEL_HDA_MTL 0x7e28 diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h index e838a2b904..17a87c1a55 100644 --- a/include/linux/pds/pds_core_if.h +++ b/include/linux/pds/pds_core_if.h @@ -79,6 +79,7 @@ enum pds_core_status_code { PDS_RC_EVFID = 31, /* VF ID does not exist */ PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */ PDS_RC_ECLIENT = 33, /* No such client id */ + PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */ }; /** diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 68fac2e7cb..8c677f1859 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -132,6 +132,7 @@ extern void __init setup_per_cpu_areas(void); extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); extern void free_percpu(void __percpu *__pdata); +extern size_t pcpu_alloc_size(void __percpu *__pdata); DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index d01351b152..3a44dd1e33 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -57,6 +57,8 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); +bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, + s64 amount, s32 batch); void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) @@ -69,6 +71,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + return __percpu_counter_limited_add(fbc, limit, amount, + percpu_counter_batch); +} + /* * With percpu_counter_add_local() and percpu_counter_sub_local(), counts * are accumulated in local per cpu counter and not in fbc->count until @@ -185,6 +194,27 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) local_irq_restore(flags); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + unsigned long flags; + bool good = false; + s64 count; + + if (amount == 0) + return true; + + local_irq_save(flags); + count = fbc->count + amount; + if ((amount > 0 && count <= limit) || + (amount < 0 && count >= limit)) { + fbc->count = count; + good = true; + } + local_irq_restore(flags); + return good; +} + /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index e3899bd77f..9c226adf93 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -234,9 +234,12 @@ /* * Event filters for PMUv3 */ -#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) -#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) -#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) +#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29) +#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28) +#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26) /* * PMUSERENR: user enable reg diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e846f87e2d..5547ba68e6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1578,7 +1578,7 @@ extern int sysctl_perf_cpu_time_max_percent; extern void perf_sample_event_took(u64 sample_len_ns); -int perf_proc_update_handler(struct ctl_table *table, int write, +int perf_event_max_sample_rate_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/phy.h b/include/linux/phy.h index 1351b802ff..bd28595097 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -568,7 +568,6 @@ struct macsec_ops; * - Bits [31:24] are reserved for defining generic * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) - * @phy_timer: The timer for handling the state machine * @phylink: Pointer to phylink instance for this PHY * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached * @sfp_bus: SFP bus attached to this PHY's fiber port @@ -1736,6 +1735,7 @@ void phy_detach(struct phy_device *phydev); void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_config_aneg(struct phy_device *phydev); +int _phy_start_aneg(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); int phy_speed_down(struct phy_device *phydev, bool sync); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2b886ea654..875439ab45 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -227,7 +227,7 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); /** * struct phylink_mac_ops - MAC operations structure. - * @validate: Validate and update the link configuration. + * @mac_get_caps: Get MAC capabilities for interface mode. * @mac_select_pcs: Select a PCS for the interface mode. * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. @@ -238,9 +238,8 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); * The individual methods are described more fully below. */ struct phylink_mac_ops { - void (*validate)(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state); + unsigned long (*mac_get_caps)(struct phylink_config *config, + phy_interface_t interface); struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config, phy_interface_t interface); int (*mac_prepare)(struct phylink_config *config, unsigned int mode, @@ -259,39 +258,17 @@ struct phylink_mac_ops { #if 0 /* For kernel-doc purposes only. */ /** - * validate - Validate and update the link configuration + * mac_get_caps: Get MAC capabilities for interface mode. * @config: a pointer to a &struct phylink_config. - * @supported: ethtool bitmask for supported link modes. - * @state: a pointer to a &struct phylink_link_state. - * - * Clear bits in the @supported and @state->advertising masks that - * are not supportable by the MAC. - * - * Note that the PHY may be able to transform from one connection - * technology to another, so, eg, don't clear 1000BaseX just - * because the MAC is unable to BaseX mode. This is more about - * clearing unsupported speeds and duplex settings. The port modes - * should not be cleared; phylink_set_port_modes() will help with this. - * - * When @config->supported_interfaces has been set, phylink will iterate - * over the supported interfaces to determine the full capability of the - * MAC. The validation function must not print errors if @state->interface - * is set to an unexpected value. + * @interface: PHY interface mode. * - * When @config->supported_interfaces is empty, phylink will call this - * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and - * expects the MAC driver to return all supported link modes. - * - * If the @state->interface mode is not supported, then the @supported - * mask must be cleared. - * - * This member is optional; if not set, the generic validator will be - * used making use of @config->mac_capabilities and - * @config->supported_interfaces to determine which link modes are - * supported. + * Optional method. When not provided, config->mac_capabilities will be used. + * When implemented, this returns the MAC capabilities for the specified + * interface mode where there is some special handling required by the MAC + * driver (e.g. not supporting half-duplex in certain interface modes.) */ -void validate(struct phylink_config *config, unsigned long *supported, - struct phylink_link_state *state); +unsigned long mac_get_caps(struct phylink_config *config, + phy_interface_t interface); /** * mac_select_pcs: Select a PCS for the interface mode. * @config: a pointer to a &struct phylink_config. @@ -636,17 +613,6 @@ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); #endif -void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps); -unsigned long phylink_get_capabilities(phy_interface_t interface, - unsigned long mac_capabilities, - int rate_matching); -void phylink_validate_mask_caps(unsigned long *supported, - struct phylink_link_state *state, - unsigned long caps); -void phylink_generic_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state); - struct phylink *phylink_create(struct phylink_config *, const struct fwnode_handle *, phy_interface_t, diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 4729d54e89..73de70362b 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -17,6 +17,7 @@ #include <linux/pinctrl/pinctrl-state.h> struct device; +struct gpio_chip; /* This struct is private to the core and should be regarded as a cookie */ struct pinctrl; @@ -25,27 +26,30 @@ struct pinctrl_state; #ifdef CONFIG_PINCTRL /* External interface to pin control */ -extern bool pinctrl_gpio_can_use_line(unsigned gpio); -extern int pinctrl_gpio_request(unsigned gpio); -extern void pinctrl_gpio_free(unsigned gpio); -extern int pinctrl_gpio_direction_input(unsigned gpio); -extern int pinctrl_gpio_direction_output(unsigned gpio); -extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config); - -extern struct pinctrl * __must_check pinctrl_get(struct device *dev); -extern void pinctrl_put(struct pinctrl *p); -extern struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p, - const char *name); -extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); - -extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); -extern void devm_pinctrl_put(struct pinctrl *p); -extern int pinctrl_select_default_state(struct device *dev); +bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset); +int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset); +void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset); +int pinctrl_gpio_direction_input(struct gpio_chip *gc, + unsigned int offset); +int pinctrl_gpio_direction_output(struct gpio_chip *gc, + unsigned int offset); +int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config); + +struct pinctrl * __must_check pinctrl_get(struct device *dev); +void pinctrl_put(struct pinctrl *p); +struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p, + const char *name); +int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s); + +struct pinctrl * __must_check devm_pinctrl_get(struct device *dev); +void devm_pinctrl_put(struct pinctrl *p); +int pinctrl_select_default_state(struct device *dev); #ifdef CONFIG_PM -extern int pinctrl_pm_select_default_state(struct device *dev); -extern int pinctrl_pm_select_sleep_state(struct device *dev); -extern int pinctrl_pm_select_idle_state(struct device *dev); +int pinctrl_pm_select_default_state(struct device *dev); +int pinctrl_pm_select_sleep_state(struct device *dev); +int pinctrl_pm_select_idle_state(struct device *dev); #else static inline int pinctrl_pm_select_default_state(struct device *dev) { @@ -63,31 +67,38 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev) #else /* !CONFIG_PINCTRL */ -static inline bool pinctrl_gpio_can_use_line(unsigned gpio) +static inline bool +pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset) { return true; } -static inline int pinctrl_gpio_request(unsigned gpio) +static inline int +pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset) { return 0; } -static inline void pinctrl_gpio_free(unsigned gpio) +static inline void +pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset) { } -static inline int pinctrl_gpio_direction_input(unsigned gpio) +static inline int +pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { return 0; } -static inline int pinctrl_gpio_direction_output(unsigned gpio) +static inline int +pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset) { return 0; } -static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config) +static inline int +pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) { return 0; } diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h index 0639b36f43..ee8803f6ad 100644 --- a/include/linux/pinctrl/machine.h +++ b/include/linux/pinctrl/machine.h @@ -11,7 +11,7 @@ #ifndef __LINUX_PINCTRL_MACHINE_H #define __LINUX_PINCTRL_MACHINE_H -#include <linux/kernel.h> /* ARRAY_SIZE() */ +#include <linux/array_size.h> #include <linux/pinctrl/pinctrl-state.h> diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 288a8081a9..8ff23bf5a8 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -62,9 +62,6 @@ struct pipe_inode_info { unsigned int tail; unsigned int max_usage; unsigned int ring_size; -#ifdef CONFIG_WATCH_QUEUE - bool note_loss; -#endif unsigned int nr_accounted; unsigned int readers; unsigned int writers; @@ -72,6 +69,9 @@ struct pipe_inode_info { unsigned int r_counter; unsigned int w_counter; bool poll_usage; +#ifdef CONFIG_WATCH_QUEUE + bool note_loss; +#endif struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 80cb00db42..79594aeb16 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -154,7 +154,9 @@ struct packet_stacked_data struct pktcdvd_device { - struct block_device *bdev; /* dev attached */ + struct bdev_handle *bdev_handle; /* dev attached */ + /* handle acquired for bdev during pkt_open_dev() */ + struct bdev_handle *open_bdev_handle; dev_t pkt_dev; /* our dev */ struct packet_settings settings; struct packet_stats stats; diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index ab721cf13a..7dae17b62a 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4436,8 +4436,20 @@ struct ec_response_i2c_passthru_protect { * These commands are for sending and receiving message via HDMI CEC */ +#define EC_CEC_MAX_PORTS 16 + #define MAX_CEC_MSG_LEN 16 +/* + * Helper macros for packing/unpacking cec_events. + * bits[27:0] : bitmask of events from enum mkbp_cec_event + * bits[31:28]: port number + */ +#define EC_MKBP_EVENT_CEC_PACK(events, port) \ + (((events) & GENMASK(27, 0)) | (((port) & 0xf) << 28)) +#define EC_MKBP_EVENT_CEC_GET_EVENTS(event) ((event) & GENMASK(27, 0)) +#define EC_MKBP_EVENT_CEC_GET_PORT(event) (((event) >> 28) & 0xf) + /* CEC message from the AP to be written on the CEC bus */ #define EC_CMD_CEC_WRITE_MSG 0x00B8 @@ -4449,19 +4461,54 @@ struct ec_params_cec_write { uint8_t msg[MAX_CEC_MSG_LEN]; } __ec_align1; +/** + * struct ec_params_cec_write_v1 - Message to write to the CEC bus + * @port: CEC port to write the message on + * @msg_len: length of msg in bytes + * @msg: message content to write to the CEC bus + */ +struct ec_params_cec_write_v1 { + uint8_t port; + uint8_t msg_len; + uint8_t msg[MAX_CEC_MSG_LEN]; +} __ec_align1; + +/* CEC message read from a CEC bus reported back to the AP */ +#define EC_CMD_CEC_READ_MSG 0x00B9 + +/** + * struct ec_params_cec_read - Read a message from the CEC bus + * @port: CEC port to read a message on + */ +struct ec_params_cec_read { + uint8_t port; +} __ec_align1; + +/** + * struct ec_response_cec_read - Message read from the CEC bus + * @msg_len: length of msg in bytes + * @msg: message content read from the CEC bus + */ +struct ec_response_cec_read { + uint8_t msg_len; + uint8_t msg[MAX_CEC_MSG_LEN]; +} __ec_align1; + /* Set various CEC parameters */ #define EC_CMD_CEC_SET 0x00BA /** * struct ec_params_cec_set - CEC parameters set * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + * @port: CEC port to set the parameter on * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC * or 1 to enable CEC functionality, in case cmd is * CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical * address between 0 and 15 or 0xff to unregister */ struct ec_params_cec_set { - uint8_t cmd; /* enum cec_command */ + uint8_t cmd : 4; /* enum cec_command */ + uint8_t port : 4; uint8_t val; } __ec_align1; @@ -4471,9 +4518,11 @@ struct ec_params_cec_set { /** * struct ec_params_cec_get - CEC parameters get * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + * @port: CEC port to get the parameter on */ struct ec_params_cec_get { - uint8_t cmd; /* enum cec_command */ + uint8_t cmd : 4; /* enum cec_command */ + uint8_t port : 4; } __ec_align1; /** @@ -4487,6 +4536,17 @@ struct ec_response_cec_get { uint8_t val; } __ec_align1; +/* Get the number of CEC ports */ +#define EC_CMD_CEC_PORT_COUNT 0x00C1 + +/** + * struct ec_response_cec_port_count - CEC port count response + * @port_count: number of CEC ports + */ +struct ec_response_cec_port_count { + uint8_t port_count; +} __ec_align1; + /* CEC parameters command */ enum cec_command { /* CEC reading, writing and events enable */ @@ -4501,6 +4561,8 @@ enum mkbp_cec_event { EC_MKBP_CEC_SEND_OK = BIT(0), /* Outgoing message was not acknowledged */ EC_MKBP_CEC_SEND_FAILED = BIT(1), + /* Incoming message can be read out by AP */ + EC_MKBP_CEC_HAVE_DATA = BIT(2), }; /*****************************************************************************/ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 4f9f756bc1..8865e350c1 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -258,7 +258,7 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature); int cros_ec_get_sensor_count(struct cros_ec_dev *ec); -int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata, +int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata, size_t outsize, void *indata, size_t insize); /** diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index f377817ce7..cdd8cfb424 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -144,9 +144,6 @@ #define OMAP_MAX_GPIO_LINES 192 -#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) -#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) - #ifndef __ASSEMBLER__ struct omap_gpio_reg_offs { u16 revision; diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h index f2781aa7ef..70e8a6bec0 100644 --- a/include/linux/platform_data/gsc_hwmon.h +++ b/include/linux/platform_data/gsc_hwmon.h @@ -40,6 +40,6 @@ struct gsc_hwmon_platform_data { unsigned int resolution; unsigned int vreference; unsigned int fan_base; - struct gsc_hwmon_channel channels[]; + struct gsc_hwmon_channel channels[] __counted_by(nchannels); }; #endif diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h index 0dd851ea1c..7fcb55fe21 100644 --- a/include/linux/platform_data/omap-twl4030.h +++ b/include/linux/platform_data/omap-twl4030.h @@ -37,9 +37,6 @@ struct omap_tw4030_pdata { bool has_digimic0; bool has_digimic1; u8 has_linein; - - /* Jack detect GPIO or <= 0 if it is not implemented */ - int jack_detect; }; #endif /* _OMAP_TWL4030_H_ */ diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h index 96c1a14ab3..3c3787c4d9 100644 --- a/include/linux/platform_data/pca953x.h +++ b/include/linux/platform_data/pca953x.h @@ -11,21 +11,8 @@ struct pca953x_platform_data { /* number of the first GPIO */ unsigned gpio_base; - /* initial polarity inversion setting */ - u32 invert; - /* interrupt base */ int irq_base; - - void *context; /* param to setup/teardown */ - - int (*setup)(struct i2c_client *client, - unsigned gpio, unsigned ngpio, - void *context); - void (*teardown)(struct i2c_client *client, - unsigned gpio, unsigned ngpio, - void *context); - const char *const *names; }; #endif /* _LINUX_PCA953X_H */ diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index d661399b21..6c19d4fbbe 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h @@ -10,7 +10,7 @@ #ifndef __SHMOB_DRM_H__ #define __SHMOB_DRM_H__ -#include <drm/drm_mode.h> +#include <video/videomode.h> enum shmob_drm_clk_source { SHMOB_DRM_CLK_BUS, @@ -18,72 +18,21 @@ enum shmob_drm_clk_source { SHMOB_DRM_CLK_EXTERNAL, }; -enum shmob_drm_interface { - SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */ - SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */ - SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */ - SHMOB_DRM_IFACE_RGB12B, /* 12bpp */ - SHMOB_DRM_IFACE_RGB16, /* 16bpp */ - SHMOB_DRM_IFACE_RGB18, /* 18bpp */ - SHMOB_DRM_IFACE_RGB24, /* 24bpp */ - SHMOB_DRM_IFACE_YUV422, /* 16bpp */ - SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */ - SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */ - SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */ - SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */ - SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */ - SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */ - SHMOB_DRM_IFACE_SYS16A, /* 16bpp */ - SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */ - SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */ - SHMOB_DRM_IFACE_SYS18, /* 18bpp */ - SHMOB_DRM_IFACE_SYS24, /* 24bpp */ -}; - -struct shmob_drm_backlight_data { - const char *name; - int max_brightness; - int (*get_brightness)(void); - int (*set_brightness)(int brightness); -}; - struct shmob_drm_panel_data { unsigned int width_mm; /* Panel width in mm */ unsigned int height_mm; /* Panel height in mm */ - struct drm_mode_modeinfo mode; + struct videomode mode; }; -struct shmob_drm_sys_interface_data { - unsigned int read_latch:6; - unsigned int read_setup:8; - unsigned int read_cycle:8; - unsigned int read_strobe:8; - unsigned int write_setup:8; - unsigned int write_cycle:8; - unsigned int write_strobe:8; - unsigned int cs_setup:3; - unsigned int vsync_active_high:1; - unsigned int vsync_dir_input:1; -}; - -#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */ -#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */ -#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */ -#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */ -#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */ - struct shmob_drm_interface_data { - enum shmob_drm_interface interface; - struct shmob_drm_sys_interface_data sys; + unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */ unsigned int clk_div; - unsigned int flags; }; struct shmob_drm_platform_data { enum shmob_drm_clk_source clk_source; struct shmob_drm_interface_data iface; struct shmob_drm_panel_data panel; - struct shmob_drm_backlight_data backlight; }; #endif /* __SHMOB_DRM_H__ */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 16e99a1c37..ab1c7deff1 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -58,6 +58,10 @@ #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ #define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 +/* This can only be used to disable the screen, not re-enable */ +#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031 +/* Writing a brightness re-enables the screen if disabled */ +#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032 #define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018 #define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075 @@ -110,6 +114,9 @@ /* Charging mode - 1=Barrel, 2=USB */ #define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C +/* MCU powersave mode */ +#define ASUS_WMI_DEVID_MCU_POWERSAVE 0x001200E2 + /* epu is connected? 1 == true */ #define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018 /* egpu on/off */ diff --git a/include/linux/pm.h b/include/linux/pm.h index 629c1633bb..92a4f69de0 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -734,6 +734,7 @@ extern void dev_pm_put_subsys_data(struct device *dev); * @activate: Called before executing probe routines for bus types and drivers. * @sync: Called after successful driver probe. * @dismiss: Called after unsuccessful driver probe and after driver removal. + * @set_performance_state: Called to request a new performance state. * * Power domains provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions instead of @@ -746,6 +747,7 @@ struct dev_pm_domain { int (*activate)(struct device *dev); void (*sync)(struct device *dev); void (*dismiss)(struct device *dev); + int (*set_performance_state)(struct device *dev, unsigned int state); }; /* diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index f776fb93ea..34663d0d5c 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -61,6 +61,10 @@ * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its * components' next wakeup when determining the * optimal idle state. + * + * GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states, + * but its corresponding OPP tables are not + * described in DT, but are given directly by FW. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) @@ -69,6 +73,7 @@ #define GENPD_FLAG_CPU_DOMAIN (1U << 4) #define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5) #define GENPD_FLAG_MIN_RESIDENCY (1U << 6) +#define GENPD_FLAG_OPP_TABLE_FW (1U << 7) enum gpd_status { GENPD_STATE_ON = 0, /* PM domain is on */ @@ -430,6 +435,7 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev, void dev_pm_domain_detach(struct device *dev, bool power_off); int dev_pm_domain_start(struct device *dev); void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); +int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state); #else static inline int dev_pm_domain_attach(struct device *dev, bool power_on) { @@ -452,6 +458,11 @@ static inline int dev_pm_domain_start(struct device *dev) } static inline void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd) {} +static inline int dev_pm_domain_set_performance_state(struct device *dev, + unsigned int state) +{ + return 0; +} #endif #endif /* _LINUX_PM_DOMAIN_H */ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 91f87d7e80..18a102174c 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -92,6 +92,18 @@ struct dev_pm_opp_config { struct device ***virt_devs; }; +/** + * struct dev_pm_opp_data - The data to use to initialize an OPP. + * @level: The performance level for the OPP. + * @freq: The clock rate in Hz for the OPP. + * @u_volt: The voltage in uV for the OPP. + */ +struct dev_pm_opp_data { + unsigned int level; + unsigned long freq; + unsigned long u_volt; +}; + #if defined(CONFIG_PM_OPP) struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); @@ -144,6 +156,9 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, unsigned int *level); +struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, + unsigned int *level); + struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index); @@ -152,8 +167,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, void dev_pm_opp_put(struct dev_pm_opp *opp); -int dev_pm_opp_add(struct device *dev, unsigned long freq, - unsigned long u_volt); +int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp); + void dev_pm_opp_remove(struct device *dev, unsigned long freq); void dev_pm_opp_remove_all_dynamic(struct device *dev); @@ -308,6 +323,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, + unsigned int *level) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index) { @@ -322,8 +343,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} -static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, - unsigned long u_volt) +static inline int +dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp) { return -EOPNOTSUPP; } @@ -519,6 +540,17 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta /* OPP Configuration helpers */ +static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, + unsigned long u_volt) +{ + struct dev_pm_opp_data data = { + .freq = freq, + .u_volt = u_volt, + }; + + return dev_pm_opp_add_dynamic(dev, &data); +} + /* Regulators helpers */ static inline int dev_pm_opp_set_regulators(struct device *dev, const char * const names[]) diff --git a/include/linux/pnp.h b/include/linux/pnp.h index c2a7cfbca7..267fb8a4fb 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -291,7 +291,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data) struct pnp_fixup { char id[7]; - void (*quirk_function) (struct pnp_dev * dev); /* fixup function */ + void (*quirk_function) (struct pnp_dev *dev); /* fixup function */ }; /* config parameters */ @@ -419,8 +419,8 @@ struct pnp_protocol { /* protocol specific suspend/resume */ bool (*can_wakeup) (struct pnp_dev *dev); - int (*suspend) (struct pnp_dev * dev, pm_message_t state); - int (*resume) (struct pnp_dev * dev); + int (*suspend) (struct pnp_dev *dev, pm_message_t state); + int (*resume) (struct pnp_dev *dev); /* used by pnp layer only (look but don't touch) */ unsigned char number; /* protocol number */ @@ -492,7 +492,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; } -static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;} +static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0; } /* protocol helpers */ static inline int pnp_is_active(struct pnp_dev *dev) { return 0; } diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 468328b1e1..ef8619f489 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -14,6 +14,7 @@ #include <linux/rwsem.h> struct posix_clock; +struct posix_clock_context; /** * struct posix_clock_operations - functional interface to the clock @@ -50,18 +51,18 @@ struct posix_clock_operations { /* * Optional character device methods: */ - long (*ioctl) (struct posix_clock *pc, - unsigned int cmd, unsigned long arg); + long (*ioctl)(struct posix_clock_context *pccontext, unsigned int cmd, + unsigned long arg); - int (*open) (struct posix_clock *pc, fmode_t f_mode); + int (*open)(struct posix_clock_context *pccontext, fmode_t f_mode); - __poll_t (*poll) (struct posix_clock *pc, - struct file *file, poll_table *wait); + __poll_t (*poll)(struct posix_clock_context *pccontext, struct file *file, + poll_table *wait); - int (*release) (struct posix_clock *pc); + int (*release)(struct posix_clock_context *pccontext); - ssize_t (*read) (struct posix_clock *pc, - uint flags, char __user *buf, size_t cnt); + ssize_t (*read)(struct posix_clock_context *pccontext, uint flags, + char __user *buf, size_t cnt); }; /** @@ -91,6 +92,24 @@ struct posix_clock { }; /** + * struct posix_clock_context - represents clock file operations context + * + * @clk: Pointer to the clock + * @private_clkdata: Pointer to user data + * + * Drivers should use struct posix_clock_context during specific character + * device file operation methods to access the posix clock. + * + * Drivers can store a private data structure during the open operation + * if they have specific information that is required in other file + * operations. + */ +struct posix_clock_context { + struct posix_clock *clk; + void *private_clkdata; +}; + +/** * posix_clock_register() - register a new clock * @clk: Pointer to the clock. Caller must provide 'ops' field * @dev: Pointer to the initialized device. Caller must provide diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 85b86768c0..c0992a77fe 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -767,7 +767,6 @@ struct power_supply_battery_info { int bti_resistance_tolerance; }; -extern struct blocking_notifier_head power_supply_notifier; extern int power_supply_reg_notifier(struct notifier_block *nb); extern void power_supply_unreg_notifier(struct notifier_block *nb); #if IS_ENABLED(CONFIG_POWER_SUPPLY) diff --git a/include/linux/property.h b/include/linux/property.h index 8c3c6685a2..9f2585d705 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -10,6 +10,7 @@ #ifndef _LINUX_PROPERTY_H_ #define _LINUX_PROPERTY_H_ +#include <linux/args.h> #include <linux/bits.h> #include <linux/fwnode.h> #include <linux/stddef.h> @@ -288,7 +289,7 @@ struct software_node_ref_args { #define SOFTWARE_NODE_REFERENCE(_ref_, ...) \ (const struct software_node_ref_args) { \ .node = _ref_, \ - .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ + .nargs = COUNT_ARGS(__VA_ARGS__), \ .args = { __VA_ARGS__ }, \ } diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h index eceda1d140..730f77381d 100644 --- a/include/linux/pseudo_fs.h +++ b/include/linux/pseudo_fs.h @@ -5,7 +5,7 @@ struct pseudo_fs_context { const struct super_operations *ops; - const struct xattr_handler **xattr; + const struct xattr_handler * const *xattr; const struct dentry_operations *dops; unsigned long magic; }; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index eaaef3ffec..90507d4afc 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -393,6 +393,10 @@ static inline void user_single_step_report(struct pt_regs *regs) #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) #endif +#ifndef exception_ip +#define exception_ip(x) instruction_pointer(x) +#endif + extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index fe0f38ce1b..cda3597b84 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -71,7 +71,6 @@ struct pwm_state { * @hwpwm: per-chip relative index of the PWM device * @pwm: global index of the PWM device * @chip: PWM chip providing this PWM device - * @chip_data: chip-private data associated with the PWM device * @args: PWM arguments * @state: last applied state * @last: last implemented state (for PWM_DEBUG) @@ -82,7 +81,6 @@ struct pwm_device { unsigned int hwpwm; unsigned int pwm; struct pwm_chip *chip; - void *chip_data; struct pwm_args args; struct pwm_state state; @@ -267,7 +265,6 @@ struct pwm_capture { * @get_state: get the current PWM state. This function is only * called once per PWM device when the PWM chip is * registered. - * @owner: helps prevent removal of modules exporting active PWMs */ struct pwm_ops { int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); @@ -278,13 +275,13 @@ struct pwm_ops { const struct pwm_state *state); int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); - struct module *owner; }; /** * struct pwm_chip - abstract a PWM controller * @dev: device providing the PWMs * @ops: callbacks for this PWM controller + * @owner: module providing this chip * @base: number of first PWM controlled by this chip * @npwm: number of PWMs controlled by this chip * @of_xlate: request a PWM device given a device tree PWM specifier @@ -295,6 +292,7 @@ struct pwm_ops { struct pwm_chip { struct device *dev; const struct pwm_ops *ops; + struct module *owner; int base; unsigned int npwm; @@ -383,13 +381,13 @@ static inline void pwm_disable(struct pwm_device *pwm) /* PWM provider APIs */ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout); -int pwm_set_chip_data(struct pwm_device *pwm, void *data); -void *pwm_get_chip_data(struct pwm_device *pwm); -int pwmchip_add(struct pwm_chip *chip); +int __pwmchip_add(struct pwm_chip *chip, struct module *owner); +#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE) void pwmchip_remove(struct pwm_chip *chip); -int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip); +int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner); +#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE) struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, @@ -445,16 +443,6 @@ static inline int pwm_capture(struct pwm_device *pwm, return -EINVAL; } -static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) -{ - return -EINVAL; -} - -static inline void *pwm_get_chip_data(struct pwm_device *pwm) -{ - return NULL; -} - static inline int pwmchip_add(struct pwm_chip *chip) { return -EINVAL; diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 006e18decf..98030accf6 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -84,8 +84,6 @@ extern const struct raid6_calls raid6_intx1; extern const struct raid6_calls raid6_intx2; extern const struct raid6_calls raid6_intx4; extern const struct raid6_calls raid6_intx8; -extern const struct raid6_calls raid6_intx16; -extern const struct raid6_calls raid6_intx32; extern const struct raid6_calls raid6_mmxx1; extern const struct raid6_calls raid6_mmxx2; extern const struct raid6_calls raid6_sse1x1; diff --git a/include/linux/rcu_notifier.h b/include/linux/rcu_notifier.h new file mode 100644 index 0000000000..5640f02477 --- /dev/null +++ b/include/linux/rcu_notifier.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Read-Copy Update notifiers, initially RCU CPU stall notifier. + * Separate from rcupdate.h to avoid #include loops. + * + * Copyright (C) 2023 Paul E. McKenney. + */ + +#ifndef __LINUX_RCU_NOTIFIER_H +#define __LINUX_RCU_NOTIFIER_H + +// Actions for RCU CPU stall notifier calls. +#define RCU_STALL_NOTIFY_NORM 1 +#define RCU_STALL_NOTIFY_EXP 2 + +#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) + +#include <linux/notifier.h> +#include <linux/types.h> + +int rcu_stall_chain_notifier_register(struct notifier_block *n); +int rcu_stall_chain_notifier_unregister(struct notifier_block *n); + +#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) + +// No RCU CPU stall warnings in Tiny RCU. +static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; } +static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; } + +#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) + +#endif /* __LINUX_RCU_NOTIFIER_H */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 44aab5c0bd..31d523c4e0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -122,8 +122,6 @@ static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) void rcu_init(void); extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); -void rcu_report_dead(unsigned int cpu); -void rcutree_migrate_callbacks(int cpu); #ifdef CONFIG_TASKS_RCU_GENERIC void rcu_init_tasks_generic(void); diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 7b94929290..d9ac7b136a 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -171,6 +171,6 @@ static inline void rcu_all_qs(void) { barrier(); } #define rcutree_offline_cpu NULL #define rcutree_dead_cpu NULL #define rcutree_dying_cpu NULL -static inline void rcu_cpu_starting(unsigned int cpu) { } +static inline void rcutree_report_cpu_starting(unsigned int cpu) { } #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 126f6b418f..254244202e 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -37,7 +37,6 @@ void synchronize_rcu_expedited(void); void kvfree_call_rcu(struct rcu_head *head, void *ptr); void rcu_barrier(void); -bool rcu_eqs_special_set(int cpu); void rcu_momentary_dyntick_idle(void); void kfree_rcu_scheduler_running(void); bool rcu_gp_might_be_stalled(void); @@ -111,9 +110,21 @@ void rcu_all_qs(void); /* RCUtree hotplug events */ int rcutree_prepare_cpu(unsigned int cpu); int rcutree_online_cpu(unsigned int cpu); -int rcutree_offline_cpu(unsigned int cpu); +void rcutree_report_cpu_starting(unsigned int cpu); + +#ifdef CONFIG_HOTPLUG_CPU int rcutree_dead_cpu(unsigned int cpu); int rcutree_dying_cpu(unsigned int cpu); -void rcu_cpu_starting(unsigned int cpu); +int rcutree_offline_cpu(unsigned int cpu); +#else +#define rcutree_dead_cpu NULL +#define rcutree_dying_cpu NULL +#define rcutree_offline_cpu NULL +#endif + +void rcutree_migrate_callbacks(int cpu); + +/* Called from hotplug and also arm64 early secondary boot failure */ +void rcutree_report_cpu_dead(void); #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 2b6bb593be..c4cc3b89ce 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -129,11 +129,14 @@ enum sys_off_mode { * @cb_data: User's callback data. * @cmd: Command string. Currently used only by the sys-off restart mode, * NULL otherwise. + * @dev: Device of the sys-off handler. Only if known (devm_register_*), + * NULL otherwise. */ struct sys_off_data { int mode; void *cb_data; const char *cmd; + struct device *dev; }; struct sys_off_handler * diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h index c71a6a9fce..562386f9b8 100644 --- a/include/linux/regulator/mt6358-regulator.h +++ b/include/linux/regulator/mt6358-regulator.h @@ -86,6 +86,9 @@ enum { MT6366_ID_VMC, MT6366_ID_VAUD28, MT6366_ID_VSIM2, + MT6366_ID_VM18, + MT6366_ID_VMDDR, + MT6366_ID_VSRAM_CORE, MT6366_ID_RG_MAX, }; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8334eeacfe..66942d7fba 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -94,7 +94,7 @@ struct rdt_domain { * zero CBM. * @shareable_bits: Bitmask of shareable resource with other * executing entities - * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache * level has CPU scope. */ @@ -102,7 +102,7 @@ struct resctrl_cache { unsigned int cbm_len; unsigned int min_cbm_bits; unsigned int shareable_bits; - bool arch_has_sparse_bitmaps; + bool arch_has_sparse_bitmasks; bool arch_has_per_cpu_cfg; }; diff --git a/include/linux/rethook.h b/include/linux/rethook.h index 544e1bbfad..ba60962805 100644 --- a/include/linux/rethook.h +++ b/include/linux/rethook.h @@ -6,11 +6,10 @@ #define _LINUX_RETHOOK_H #include <linux/compiler.h> -#include <linux/freelist.h> +#include <linux/objpool.h> #include <linux/kallsyms.h> #include <linux/llist.h> #include <linux/rcupdate.h> -#include <linux/refcount.h> struct rethook_node; @@ -35,14 +34,12 @@ struct rethook { * rethook_handler_t. */ void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *); - struct freelist_head pool; - refcount_t ref; + struct objpool_head pool; struct rcu_head rcu; }; /** * struct rethook_node - The rethook shadow-stack entry node. - * @freelist: The freelist, linked to struct rethook::pool. * @rcu: The rcu_head for deferred freeing. * @llist: The llist, linked to a struct task_struct::rethooks. * @rethook: The pointer to the struct rethook. @@ -53,20 +50,16 @@ struct rethook { * on each entry of the shadow stack. */ struct rethook_node { - union { - struct freelist_node freelist; - struct rcu_head rcu; - }; + struct rcu_head rcu; struct llist_node llist; struct rethook *rethook; unsigned long ret_addr; unsigned long frame; }; -struct rethook *rethook_alloc(void *data, rethook_handler_t handler); +struct rethook *rethook_alloc(void *data, rethook_handler_t handler, int size, int num); void rethook_stop(struct rethook *rh); void rethook_free(struct rethook *rh); -void rethook_add_node(struct rethook *rh, struct rethook_node *node); struct rethook_node *rethook_try_get(struct rethook *rh); void rethook_recycle(struct rethook_node *node); void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount); @@ -103,4 +96,3 @@ void rethook_flush_task(struct task_struct *tk); #endif #endif - diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b1fb58b435..3c2fc291b0 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -189,7 +189,7 @@ typedef int __bitwise rmap_t; /* * rmap interfaces called when adding or removing pte of page */ -void page_move_anon_rmap(struct page *, struct vm_area_struct *); +void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, @@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, void page_remove_rmap(struct page *, struct vm_area_struct *, bool compound); -void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, +void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address, rmap_t flags); void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 4c0bcbeb1f..5f8e438a03 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -225,6 +225,23 @@ static inline bool is_leap_year(unsigned int year) return (!(year % 4) && (year % 100)) || !(year % 400); } +/** + * rtc_bound_alarmtime() - Return alarm time bound by rtc limit + * @rtc: Pointer to rtc device structure + * @requested: Requested alarm timeout + * + * Return: Alarm timeout bound by maximum alarm time supported by rtc. + */ +static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc, + ktime_t requested) +{ + if (rtc->alarm_offset_max && + rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested)) + return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC); + + return requested; +} + #define devm_rtc_register_device(device) \ __devm_rtc_register_device(THIS_MODULE, device) diff --git a/include/linux/sched.h b/include/linux/sched.h index 77f01ac385..292c316972 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -63,7 +63,6 @@ struct robust_list_head; struct root_domain; struct rq; struct sched_attr; -struct sched_param; struct seq_file; struct sighand_struct; struct signal_struct; @@ -370,6 +369,10 @@ extern struct root_domain def_root_domain; extern struct mutex sched_domains_mutex; #endif +struct sched_param { + int sched_priority; +}; + struct sched_info { #ifdef CONFIG_SCHED_INFO /* Cumulative counters: */ @@ -750,10 +753,8 @@ struct task_struct { #endif unsigned int __state; -#ifdef CONFIG_PREEMPT_RT /* saved state for "spinlock sleepers" */ unsigned int saved_state; -#endif /* * This begins the randomizable portion of task_struct. Only @@ -875,6 +876,7 @@ struct task_struct { struct mm_struct *mm; struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; int exit_state; int exit_code; @@ -911,6 +913,9 @@ struct task_struct { * ->sched_remote_wakeup gets used, so it can be in this word. */ unsigned sched_remote_wakeup:1; +#ifdef CONFIG_RT_MUTEXES + unsigned sched_rt_mutex:1; +#endif /* Bit to tell LSMs we're in execve(): */ unsigned in_execve:1; @@ -1002,7 +1007,6 @@ struct task_struct { /* PID/PID hash table linkage. */ struct pid *thread_pid; struct hlist_node pid_links[PIDTYPE_MAX]; - struct list_head thread_group; struct list_head thread_node; struct completion *vfork_done; @@ -1443,6 +1447,10 @@ struct task_struct { struct mem_cgroup *active_memcg; #endif +#ifdef CONFIG_MEMCG_KMEM + struct obj_cgroup *objcg; +#endif + #ifdef CONFIG_BLK_CGROUP struct gendisk *throttle_disk; #endif diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 1b37fa8fc7..02f5090ffe 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ +#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */ #define MMF_MULTIPROCESS 26 /* mm is shared between processes */ /* @@ -85,13 +86,15 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_HAS_MDWE 28 #define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE) -#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) -#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ - MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK) +#define MMF_HAS_MDWE_NO_INHERIT 29 -#define MMF_VM_MERGE_ANY 29 -#define MMF_HAS_MDWE_NO_INHERIT 30 +#define MMF_VM_MERGE_ANY 30 +#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ + MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ + MMF_VM_MERGE_ANY_MASK) static inline unsigned long mmf_init_flags(unsigned long flags) { diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 7c83d4d5a9..df3aca89d4 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_DEADLINE_H +#define _LINUX_SCHED_DEADLINE_H /* * SCHED_DEADLINE tasks has negative priorities, reflecting @@ -34,3 +36,5 @@ extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); #endif /* CONFIG_SMP */ + +#endif /* _LINUX_SCHED_DEADLINE_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 8d89c8c4fa..9a19f1b42f 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -403,6 +403,10 @@ DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); * __GFP_ACCOUNT allocations till the end of the scope will be charged to the * given memcg. * + * Please, make sure that caller has a reference to the passed memcg structure, + * so its lifetime is guaranteed to exceed the scope between two + * set_active_memcg() calls. + * * NOTE: This function can nest. Users must save the return value and * reset the previous value after their own charging scope is over. */ diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h index 3988762efe..52b22c5c39 100644 --- a/include/linux/sched/numa_balancing.h +++ b/include/linux/sched/numa_balancing.h @@ -15,13 +15,23 @@ #define TNF_FAULT_LOCAL 0x08 #define TNF_MIGRATE_FAIL 0x10 +enum numa_vmaskip_reason { + NUMAB_SKIP_UNSUITABLE, + NUMAB_SKIP_SHARED_RO, + NUMAB_SKIP_INACCESSIBLE, + NUMAB_SKIP_SCAN_DELAY, + NUMAB_SKIP_PID_INACTIVE, + NUMAB_SKIP_IGNORE_PID, + NUMAB_SKIP_SEQ_COMPLETED, +}; + #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); extern void task_numa_free(struct task_struct *p, bool final); -extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, - int src_nid, int dst_cpu); +bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, + int src_nid, int dst_cpu); #else static inline void task_numa_fault(int last_node, int node, int pages, int flags) @@ -38,7 +48,7 @@ static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) + struct folio *folio, int src_nid, int dst_cpu) { return true; } diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index 994c25640e..b2b9e6eb96 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) } #ifdef CONFIG_RT_MUTEXES +extern void rt_mutex_pre_schedule(void); +extern void rt_mutex_schedule(void); +extern void rt_mutex_post_schedule(void); + /* * Must hold either p->pi_lock or task_rq(p)->lock. */ diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index fad77b5172..a8b28647aa 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -110,6 +110,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) /* + * Domain members share CPU cluster (LLC tags or L2 cache) + * + * NEEDS_GROUPS: Clusters are shared between groups. + */ +SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) + +/* * Domain members share CPU package resources (i.e. caches) * * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0014d3adaf..3499c1a8b9 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -303,20 +303,11 @@ static inline void kernel_signal_stop(void) schedule(); } -#ifdef __ia64__ -# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 -#else -# define ___ARCH_SI_IA64(_a1, _a2, _a3) -#endif -int force_sig_fault_to_task(int sig, int code, void __user *addr - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) - , struct task_struct *t); -int force_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); -int send_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) - , struct task_struct *t); +int force_sig_fault_to_task(int sig, int code, void __user *addr, + struct task_struct *t); +int force_sig_fault(int sig, int code, void __user *addr); +int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t); int force_sig_mceerr(int code, void __user *, short); int send_sig_mceerr(int code, void __user *, short, struct task_struct *); @@ -656,7 +647,8 @@ extern bool current_is_single_threaded(void); while ((t = next_thread(t)) != g) #define __for_each_thread(signal, t) \ - list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) + list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \ + lockdep_is_held(&tasklist_lock)) #define for_each_thread(p, t) \ __for_each_thread((p)->signal, t) @@ -715,15 +707,26 @@ bool same_thread_group(struct task_struct *p1, struct task_struct *p2) return p1->signal == p2->signal; } -static inline struct task_struct *next_thread(const struct task_struct *p) +/* + * returns NULL if p is the last thread in the thread group + */ +static inline struct task_struct *__next_thread(struct task_struct *p) +{ + return list_next_or_null_rcu(&p->signal->thread_head, + &p->thread_node, + struct task_struct, + thread_node); +} + +static inline struct task_struct *next_thread(struct task_struct *p) { - return list_entry_rcu(p->thread_group.next, - struct task_struct, thread_group); + return __next_thread(p) ?: p->group_leader; } static inline int thread_group_empty(struct task_struct *p) { - return list_empty(&p->thread_group); + return thread_group_leader(p) && + list_is_last(&p->thread_node, &p->signal->thread_head); } #define delay_group_leader(p) \ diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h index 59d3736c45..fb1e295e7e 100644 --- a/include/linux/sched/smt.h +++ b/include/linux/sched/smt.h @@ -17,4 +17,4 @@ static inline bool sched_smt_active(void) { return false; } void arch_smt_update(void); -#endif +#endif /* _LINUX_SCHED_SMT_H */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 67b573d5bf..de545ba852 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void) #ifdef CONFIG_SCHED_CLUSTER static inline int cpu_cluster_flags(void) { - return SD_SHARE_PKG_RESOURCES; + return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; } #endif @@ -109,8 +109,6 @@ struct sched_domain { u64 max_newidle_lb_cost; unsigned long last_decay_max_lb_cost; - u64 avg_scan_cost; /* select_idle_sibling */ - #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; @@ -179,6 +177,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms); void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); bool cpus_share_cache(int this_cpu, int that_cpu); +bool cpus_share_resources(int this_cpu, int that_cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); @@ -232,6 +231,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +static inline bool cpus_share_resources(int this_cpu, int that_cpu) +{ + return true; +} + #endif /* !CONFIG_SMP */ #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h index 3c3e049224..969aaf5ef9 100644 --- a/include/linux/sched/types.h +++ b/include/linux/sched/types.h @@ -20,4 +20,4 @@ struct task_cputime { unsigned long long sum_exec_runtime; }; -#endif +#endif /* _LINUX_SCHED_TYPES_H */ diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h index 837a23624a..bc60243d43 100644 --- a/include/linux/sched/vhost_task.h +++ b/include/linux/sched/vhost_task.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_VHOST_TASK_H -#define _LINUX_VHOST_TASK_H - +#ifndef _LINUX_SCHED_VHOST_TASK_H +#define _LINUX_SCHED_VHOST_TASK_H struct vhost_task; @@ -11,4 +10,4 @@ void vhost_task_start(struct vhost_task *vtsk); void vhost_task_stop(struct vhost_task *vtsk); void vhost_task_wake(struct vhost_task *vtsk); -#endif +#endif /* _LINUX_SCHED_VHOST_TASK_H */ diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 71923ae63b..f2f05fb42d 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -58,6 +58,8 @@ struct scmi_clock_info { u64 step_size; } range; }; + int num_parents; + u32 *parents; }; enum scmi_power_scale { @@ -80,6 +82,11 @@ struct scmi_protocol_handle; * @rate_set: set the clock rate of a clock * @enable: enables the specified clock * @disable: disables the specified clock + * @state_get: get the status of the specified clock + * @config_oem_get: get the value of an OEM specific clock config + * @config_oem_set: set the value of an OEM specific clock config + * @parent_get: get the parent id of a clk + * @parent_set: set the parent of a clock */ struct scmi_clk_proto_ops { int (*count_get)(const struct scmi_protocol_handle *ph); @@ -90,11 +97,19 @@ struct scmi_clk_proto_ops { u64 *rate); int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u64 rate); - int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id); - int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id); - int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id); - int (*disable_atomic)(const struct scmi_protocol_handle *ph, - u32 clk_id); + int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id, + bool atomic); + int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id, + bool atomic); + int (*state_get)(const struct scmi_protocol_handle *ph, u32 clk_id, + bool *enabled, bool atomic); + int (*config_oem_get)(const struct scmi_protocol_handle *ph, u32 clk_id, + u8 oem_type, u32 *oem_val, u32 *attributes, + bool atomic); + int (*config_oem_set)(const struct scmi_protocol_handle *ph, u32 clk_id, + u8 oem_type, u32 oem_val, bool atomic); + int (*parent_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 *parent_id); + int (*parent_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 parent_id); }; struct scmi_perf_domain_info { @@ -112,7 +127,6 @@ struct scmi_perf_domain_info { * @limits_get: gets limits on the performance level of a domain * @level_set: sets the performance level of a domain * @level_get: gets the performance level of a domain - * @device_domain_id: gets the scmi domain id for a given device * @transition_latency_get: gets the DVFS transition latency for a given device * @device_opps_add: adds all the OPPs for a given device * @freq_set: sets the frequency for a given device using sustained frequency @@ -138,11 +152,10 @@ struct scmi_perf_proto_ops { u32 level, bool poll); int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain, u32 *level, bool poll); - int (*device_domain_id)(struct device *dev); int (*transition_latency_get)(const struct scmi_protocol_handle *ph, - struct device *dev); + u32 domain); int (*device_opps_add)(const struct scmi_protocol_handle *ph, - struct device *dev); + struct device *dev, u32 domain); int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain, unsigned long rate, bool poll); int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain, @@ -150,7 +163,7 @@ struct scmi_perf_proto_ops { int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain, unsigned long *rate, unsigned long *power); bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph, - struct device *dev); + u32 domain); enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph); }; diff --git a/include/linux/security.h b/include/linux/security.h index b50c7f5661..9d3138c636 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -151,7 +151,7 @@ extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); +extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file); int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int cap_inode_removexattr(struct mnt_idmap *idmap, @@ -284,16 +284,16 @@ int security_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts); -int security_quotactl(int cmds, int type, int id, struct super_block *sb); +int security_quotactl(int cmds, int type, int id, const struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); int security_settime64(const struct timespec64 *ts, const struct timezone *tz); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_bprm_creds_for_exec(struct linux_binprm *bprm); -int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); +int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file); int security_bprm_check(struct linux_binprm *bprm); -void security_bprm_committing_creds(struct linux_binprm *bprm); -void security_bprm_committed_creds(struct linux_binprm *bprm); +void security_bprm_committing_creds(const struct linux_binprm *bprm); +void security_bprm_committed_creds(const struct linux_binprm *bprm); int security_fs_context_submount(struct fs_context *fc, struct super_block *reference); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); @@ -304,7 +304,7 @@ void security_free_mnt_opts(void **mnt_opts); int security_sb_eat_lsm_opts(char *options, void **mnt_opts); int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts); int security_sb_remount(struct super_block *sb, void *mnt_opts); -int security_sb_kern_mount(struct super_block *sb); +int security_sb_kern_mount(const struct super_block *sb); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(const char *dev_name, const struct path *path, @@ -583,7 +583,7 @@ static inline int security_capable(const struct cred *cred, } static inline int security_quotactl(int cmds, int type, int id, - struct super_block *sb) + const struct super_block *sb) { return 0; } @@ -615,7 +615,7 @@ static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm) } static inline int security_bprm_creds_from_file(struct linux_binprm *bprm, - struct file *file) + const struct file *file) { return cap_bprm_creds_from_file(bprm, file); } @@ -625,11 +625,11 @@ static inline int security_bprm_check(struct linux_binprm *bprm) return 0; } -static inline void security_bprm_committing_creds(struct linux_binprm *bprm) +static inline void security_bprm_committing_creds(const struct linux_binprm *bprm) { } -static inline void security_bprm_committed_creds(struct linux_binprm *bprm) +static inline void security_bprm_committed_creds(const struct linux_binprm *bprm) { } diff --git a/include/linux/sed-opal-key.h b/include/linux/sed-opal-key.h new file mode 100644 index 0000000000..0ca03054e8 --- /dev/null +++ b/include/linux/sed-opal-key.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * SED key operations. + * + * Copyright (C) 2023 IBM Corporation + * + * These are the accessor functions (read/write) for SED Opal + * keys. Specific keystores can provide overrides. + * + */ + +#include <linux/kernel.h> + +#ifdef CONFIG_PSERIES_PLPKS_SED +int sed_read_key(char *keyname, char *key, u_int *keylen); +int sed_write_key(char *keyname, char *key, u_int keylen); +#else +static inline +int sed_read_key(char *keyname, char *key, u_int *keylen) { + return -EOPNOTSUPP; +} +static inline +int sed_write_key(char *keyname, char *key, u_int keylen) { + return -EOPNOTSUPP; +} +#endif diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 515d7fcb96..c44f4b47b9 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -14,19 +14,24 @@ * @buffer: pointer to the buffer * @size: size of the buffer * @len: the amount of data inside the buffer - * @readpos: The next position to read in the buffer. */ struct seq_buf { char *buffer; size_t size; size_t len; - loff_t readpos; }; +#define DECLARE_SEQ_BUF(NAME, SIZE) \ + struct seq_buf NAME = { \ + .buffer = (char[SIZE]) { 0 }, \ + .size = SIZE, \ + } + static inline void seq_buf_clear(struct seq_buf *s) { s->len = 0; - s->readpos = 0; + if (s->size) + s->buffer[0] = '\0'; } static inline void @@ -39,7 +44,7 @@ seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) /* * seq_buf have a buffer that might overflow. When this happens - * the len and size are set to be equal. + * len is set to be greater than size. */ static inline bool seq_buf_has_overflowed(struct seq_buf *s) @@ -72,8 +77,8 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) } /** - * seq_buf_terminate - Make sure buffer is nul terminated - * @s: the seq_buf descriptor to terminate. + * seq_buf_str - get %NUL-terminated C string from seq_buf + * @s: the seq_buf handle * * This makes sure that the buffer in @s is nul terminated and * safe to read as a string. @@ -84,16 +89,20 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) * * After this function is called, s->buffer is safe to use * in string operations. + * + * Returns @s->buf after making sure it is terminated. */ -static inline void seq_buf_terminate(struct seq_buf *s) +static inline const char *seq_buf_str(struct seq_buf *s) { if (WARN_ON(s->size == 0)) - return; + return ""; if (seq_buf_buffer_left(s)) s->buffer[s->len] = 0; else s->buffer[s->size - 1] = 0; + + return s->buffer; } /** @@ -143,7 +152,7 @@ extern __printf(2, 0) int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, - int cnt); + size_t start, int cnt); extern int seq_buf_puts(struct seq_buf *s, const char *str); extern int seq_buf_putc(struct seq_buf *s, unsigned char c); extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 386ab580b8..234bcdb1fb 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } +#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .write = __name ## _write, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e9bd2f65d7..e92f9d5577 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -191,11 +191,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t * @locktype: LOCKNAME canonical C data type * @preemptible: preemptibility of above locktype - * @lockmember: argument for lockdep_assert_held() - * @lockbase: associated lock release function (prefix only) - * @lock_acquire: associated lock acquisition function (full call) + * @lockbase: prefix for associated lock/unlock */ -#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \ +#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ typedef struct seqcount_##lockname { \ seqcount_t seqcount; \ __SEQ_LOCK(locktype *lock); \ @@ -207,6 +205,12 @@ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ return &s->seqcount; \ } \ \ +static __always_inline const seqcount_t * \ +__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \ +{ \ + return &s->seqcount; \ +} \ + \ static __always_inline unsigned \ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ { \ @@ -216,7 +220,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ return seq; \ \ if (preemptible && unlikely(seq & 1)) { \ - __SEQ_LOCK(lock_acquire); \ + __SEQ_LOCK(lockbase##_lock(s->lock)); \ __SEQ_LOCK(lockbase##_unlock(s->lock)); \ \ /* \ @@ -242,7 +246,7 @@ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ static __always_inline void \ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ { \ - __SEQ_LOCK(lockdep_assert_held(lockmember)); \ + __SEQ_LOCK(lockdep_assert_held(s->lock)); \ } /* @@ -254,6 +258,11 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s) return s; } +static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s) +{ + return s; +} + static inline unsigned __seqprop_sequence(const seqcount_t *s) { return READ_ONCE(s->sequence); @@ -271,10 +280,10 @@ static inline void __seqprop_assert(const seqcount_t *s) #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) -SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock)) -SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) -SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) -SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) +SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) +SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) +SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) +SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) /* * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t @@ -294,19 +303,20 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) #define __seqprop_case(s, lockname, prop) \ - seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s)) + seqcount_##lockname##_t: __seqprop_##lockname##_##prop #define __seqprop(s, prop) _Generic(*(s), \ - seqcount_t: __seqprop_##prop((void *)(s)), \ + seqcount_t: __seqprop_##prop, \ __seqprop_case((s), raw_spinlock, prop), \ __seqprop_case((s), spinlock, prop), \ __seqprop_case((s), rwlock, prop), \ __seqprop_case((s), mutex, prop)) -#define seqprop_ptr(s) __seqprop(s, ptr) -#define seqprop_sequence(s) __seqprop(s, sequence) -#define seqprop_preemptible(s) __seqprop(s, preemptible) -#define seqprop_assert(s) __seqprop(s, assert) +#define seqprop_ptr(s) __seqprop(s, ptr)(s) +#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s) +#define seqprop_sequence(s) __seqprop(s, sequence)(s) +#define seqprop_preemptible(s) __seqprop(s, preemptible)(s) +#define seqprop_assert(s) __seqprop(s, assert)(s) /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier @@ -355,7 +365,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex */ #define read_seqcount_begin(s) \ ({ \ - seqcount_lockdep_reader_access(seqprop_ptr(s)); \ + seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \ raw_read_seqcount_begin(s); \ }) @@ -421,7 +431,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex * Return: true if a read section retry is required, else false */ #define __read_seqcount_retry(s, start) \ - do___read_seqcount_retry(seqprop_ptr(s), start) + do___read_seqcount_retry(seqprop_const_ptr(s), start) static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) { @@ -441,7 +451,7 @@ static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) * Return: true if a read section retry is required, else false */ #define read_seqcount_retry(s, start) \ - do_read_seqcount_retry(seqprop_ptr(s), start) + do_read_seqcount_retry(seqprop_const_ptr(s), start) static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) { @@ -574,7 +584,7 @@ static inline void do_write_seqcount_end(seqcount_t *s) * via WRITE_ONCE): a) to ensure the writes become visible to other threads * atomically, avoiding compiler optimizations; b) to document which writes are * meant to propagate to the reader critical section. This is necessary because - * neither writes before and after the barrier are enclosed in a seq-writer + * neither writes before nor after the barrier are enclosed in a seq-writer * critical section that would ensure readers are aware of ongoing writes:: * * seqcount_t seq; @@ -864,7 +874,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) } /* - * For all seqlock_t write side functions, use the the internal + * For all seqlock_t write side functions, use the internal * do_write_seqcount_begin() instead of generic write_seqcount_begin(). * This way, no redundant lockdep_assert_held() checks are added. */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index bbbafc0feb..f43aca7f3b 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -748,8 +748,17 @@ struct uart_driver { void uart_write_wakeup(struct uart_port *port); -#define __uart_port_tx(uport, ch, tx_ready, put_char, tx_done, for_test, \ - for_post) \ +/** + * enum UART_TX_FLAGS -- flags for uart_port_tx_flags() + * + * @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer + */ +enum UART_TX_FLAGS { + UART_TX_NOSTOP = BIT(0), +}; + +#define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \ + for_test, for_post) \ ({ \ struct uart_port *__port = (uport); \ struct circ_buf *xmit = &__port->state->xmit; \ @@ -777,7 +786,7 @@ void uart_write_wakeup(struct uart_port *port); if (pending < WAKEUP_CHARS) { \ uart_write_wakeup(__port); \ \ - if (pending == 0) \ + if (!((flags) & UART_TX_NOSTOP) && pending == 0) \ __port->ops->stop_tx(__port); \ } \ \ @@ -812,7 +821,7 @@ void uart_write_wakeup(struct uart_port *port); */ #define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \ unsigned int __count = (count); \ - __uart_port_tx(port, ch, tx_ready, put_char, tx_done, __count, \ + __uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \ __count--); \ }) @@ -826,8 +835,21 @@ void uart_write_wakeup(struct uart_port *port); * See uart_port_tx_limited() for more details. */ #define uart_port_tx(port, ch, tx_ready, put_char) \ - __uart_port_tx(port, ch, tx_ready, put_char, ({}), true, ({})) + __uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({})) + +/** + * uart_port_tx_flags -- transmit helper for uart_port with flags + * @port: uart port + * @ch: variable to store a character to be written to the HW + * @flags: %UART_TX_NOSTOP or similar + * @tx_ready: can HW accept more data function + * @put_char: function to write a character + * + * See uart_port_tx_limited() for more details. + */ +#define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \ + __uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({})) /* * Baud rate helpers. */ @@ -1035,14 +1057,14 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) u8 sysrq_ch; if (!port->has_sysrq) { - spin_unlock(&port->lock); + uart_port_unlock(port); return; } sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; - spin_unlock(&port->lock); + uart_port_unlock(port); if (sysrq_ch) handle_sysrq(sysrq_ch); @@ -1054,14 +1076,14 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port u8 sysrq_ch; if (!port->has_sysrq) { - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); return; } sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); if (sysrq_ch) handle_sysrq(sysrq_ch); @@ -1077,12 +1099,12 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch) } static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { - spin_unlock(&port->lock); + uart_port_unlock(port); } static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, unsigned long flags) { - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); } #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 6b0c626620..2caa6b8610 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -23,18 +23,22 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - pgoff_t fallocend; /* highest fallocate endindex */ - struct list_head shrinklist; /* shrinkable hpage inodes */ - struct list_head swaplist; /* chain of maybes on swap */ + union { + struct offset_ctx dir_offsets; /* stable directory offsets */ + struct { + struct list_head shrinklist; /* shrinkable hpage inodes */ + struct list_head swaplist; /* chain of maybes on swap */ + }; + }; + struct timespec64 i_crtime; /* file creation time */ struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ + pgoff_t fallocend; /* highest fallocate endindex */ + unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */ atomic_t stop_eviction; /* hold when working on inode */ - struct timespec64 i_crtime; /* file creation time */ - unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */ #ifdef CONFIG_TMPFS_QUOTA struct dquot *i_dquot[MAXQUOTAS]; #endif - struct offset_ctx dir_offsets; /* stable entry offsets */ struct inode vfs_inode; }; diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 224293b2dd..1a00be90d9 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -4,6 +4,25 @@ #include <linux/atomic.h> #include <linux/types.h> +#include <linux/refcount.h> +#include <linux/completion.h> + +#define SHRINKER_UNIT_BITS BITS_PER_LONG + +/* + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware + * shrinkers, which have elements charged to the memcg. + */ +struct shrinker_info_unit { + atomic_long_t nr_deferred[SHRINKER_UNIT_BITS]; + DECLARE_BITMAP(map, SHRINKER_UNIT_BITS); +}; + +struct shrinker_info { + struct rcu_head rcu; + int map_nr_max; + struct shrinker_info_unit *unit[]; +}; /* * This struct is used to pass information from page reclaim to the shrinkers. @@ -70,6 +89,19 @@ struct shrinker { int seeks; /* seeks to recreate an obj */ unsigned flags; + /* + * The reference count of this shrinker. Registered shrinker have an + * initial refcount of 1, then the lookup operations are now allowed + * to use it via shrinker_try_get(). Later in the unregistration step, + * the initial refcount will be discarded, and will free the shrinker + * asynchronously via RCU after its refcount reaches 0. + */ + refcount_t refcount; + struct completion done; /* use to wait for refcount to reach 0 */ + struct rcu_head rcu; + + void *private_data; + /* These are for internal use */ struct list_head list; #ifdef CONFIG_MEMCG @@ -86,48 +118,39 @@ struct shrinker { }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ -/* Flags */ -#define SHRINKER_REGISTERED (1 << 0) -#define SHRINKER_NUMA_AWARE (1 << 1) -#define SHRINKER_MEMCG_AWARE (1 << 2) +/* Internal flags */ +#define SHRINKER_REGISTERED BIT(0) +#define SHRINKER_ALLOCATED BIT(1) + +/* Flags for users to use */ +#define SHRINKER_NUMA_AWARE BIT(2) +#define SHRINKER_MEMCG_AWARE BIT(3) /* * It just makes sense when the shrinker is also MEMCG_AWARE for now, * non-MEMCG_AWARE shrinker should not have this flag set. */ -#define SHRINKER_NONSLAB (1 << 3) +#define SHRINKER_NONSLAB BIT(4) -extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker, - const char *fmt, ...); -extern void register_shrinker_prepared(struct shrinker *shrinker); -extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker, - const char *fmt, ...); -extern void unregister_shrinker(struct shrinker *shrinker); -extern void free_prealloced_shrinker(struct shrinker *shrinker); -extern void synchronize_shrinkers(void); +__printf(2, 3) +struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...); +void shrinker_register(struct shrinker *shrinker); +void shrinker_free(struct shrinker *shrinker); -#ifdef CONFIG_SHRINKER_DEBUG -extern int shrinker_debugfs_add(struct shrinker *shrinker); -extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, - int *debugfs_id); -extern void shrinker_debugfs_remove(struct dentry *debugfs_entry, - int debugfs_id); -extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker, - const char *fmt, ...); -#else /* CONFIG_SHRINKER_DEBUG */ -static inline int shrinker_debugfs_add(struct shrinker *shrinker) -{ - return 0; -} -static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, - int *debugfs_id) +static inline bool shrinker_try_get(struct shrinker *shrinker) { - *debugfs_id = -1; - return NULL; + return refcount_inc_not_zero(&shrinker->refcount); } -static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry, - int debugfs_id) + +static inline void shrinker_put(struct shrinker *shrinker) { + if (refcount_dec_and_test(&shrinker->refcount)) + complete(&shrinker->done); } + +#ifdef CONFIG_SHRINKER_DEBUG +extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker, + const char *fmt, ...); +#else /* CONFIG_SHRINKER_DEBUG */ static inline __printf(2, 3) int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ddfe86deb4..763e926440 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3679,6 +3679,9 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l return __skb_put_padto(skb, len, true); } +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) + __must_check; + static inline int skb_add_data(struct sk_buff *skb, struct iov_iter *from, int copy) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 8228d1276a..d6d6ffeeb9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) size_t ksize(const void *objp); #ifdef CONFIG_PRINTK -bool kmem_valid_obj(void *object); -void kmem_dump_obj(void *object); +bool kmem_dump_obj(void *object); +#else +static inline bool kmem_dump_obj(void *object) { return false; } #endif /* @@ -763,6 +764,8 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); extern void kvfree(const void *addr); +DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T)) + extern void kvfree_sensitive(const void *addr, size_t len); unsigned int kmem_cache_size(struct kmem_cache *s); diff --git a/include/linux/smp.h b/include/linux/smp.h index 91ea4a67f8..e87520dc29 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void *info, bool wait, const struct cpumask *mask); -int smp_call_function_single_async(int cpu, struct __call_single_data *csd); +int smp_call_function_single_async(int cpu, call_single_data_t *csd); /* * Cpus stopping functions in panic. All have default weak definitions. diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 07f67b3d8e..6c6cccc848 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -2,6 +2,47 @@ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H +#define MT8365_INFRA_TOPAXI_PROTECTEN_STA1 0x228 +#define MT8365_INFRA_TOPAXI_PROTECTEN_SET 0x2a0 +#define MT8365_INFRA_TOPAXI_PROTECTEN_CLR 0x2a4 +#define MT8365_INFRA_TOPAXI_PROTECTEN_MM_M0 BIT(1) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MDMCU_M1 BIT(2) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MMAPB_S BIT(6) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_0 BIT(10) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_1 BIT(11) +#define MT8365_INFRA_TOPAXI_PROTECTEN_AP2CONN_AHB BIT(13) +#define MT8365_INFRA_TOPAXI_PROTECTEN_CONN2INFRA_AHB BIT(14) +#define MT8365_INFRA_TOPAXI_PROTECTEN_MFG_M0 BIT(21) +#define MT8365_INFRA_TOPAXI_PROTECTEN_INFRA2MFG BIT(22) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_STA1 0x258 +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_SET 0x2a8 +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CLR 0x2ac +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU2AP BIT(2) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_0 BIT(16) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_1 BIT(17) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CONN2INFRA_AXI_GALS_MST BIT(18) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CAM2MM_AXI_GALS_MST BIT(19) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU_CBIP_GALS_MST BIT(20) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_INFRA2CONN_AHB_GALS_SLV BIT(21) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_INFRA_GALS_ADB BIT(24) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_MP1_L2C_AFIFO BIT(27) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_AUDIO_M BIT(28) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_M BIT(30) +#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_S BIT(31) + +#define MT8365_INFRA_NAO_TOPAXI_SI0_STA 0x0 +#define MT8365_INFRA_NAO_TOPAXI_SI0_CTRL_UPDATED BIT(24) +#define MT8365_INFRA_NAO_TOPAXI_SI2_STA 0x28 +#define MT8365_INFRA_NAO_TOPAXI_SI2_CTRL_UPDATED BIT(14) +#define MT8365_INFRA_TOPAXI_SI0_CTL 0x200 +#define MT8365_INFRA_TOPAXI_SI0_WAY_EN_MMAPB_S BIT(6) +#define MT8365_INFRA_TOPAXI_SI2_CTL 0x234 +#define MT8365_INFRA_TOPAXI_SI2_WAY_EN_PERI_M1 BIT(5) + +#define MT8365_SMI_COMMON_CLAMP_EN 0x3c0 +#define MT8365_SMI_COMMON_CLAMP_EN_SET 0x3c4 +#define MT8365_SMI_COMMON_CLAMP_EN_CLR 0x3c8 + #define MT8195_TOP_AXI_PROT_EN_STA1 0x228 #define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258 #define MT8195_TOP_AXI_PROT_EN_SET 0x2a0 diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h index b2b28180df..a476648858 100644 --- a/include/linux/soc/mediatek/mtk_wed.h +++ b/include/linux/soc/mediatek/mtk_wed.h @@ -10,6 +10,7 @@ #define MTK_WED_TX_QUEUES 2 #define MTK_WED_RX_QUEUES 2 +#define MTK_WED_RX_PAGE_QUEUES 3 #define WED_WO_STA_REC 0x6 @@ -45,7 +46,7 @@ enum mtk_wed_wo_cmd { MTK_WED_WO_CMD_WED_END }; -struct mtk_rxbm_desc { +struct mtk_wed_bm_desc { __le32 buf0; __le32 token; } __packed __aligned(4); @@ -76,6 +77,11 @@ struct mtk_wed_wo_rx_stats { __le32 rx_drop_cnt; }; +struct mtk_wed_buf { + void *p; + dma_addr_t phy_addr; +}; + struct mtk_wed_device { #ifdef CONFIG_NET_MEDIATEK_SOC_WED const struct mtk_wed_ops *ops; @@ -94,17 +100,20 @@ struct mtk_wed_device { struct mtk_wed_ring txfree_ring; struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES]; + struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES]; + struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES]; + struct mtk_wed_ring ind_cmd_ring; struct { int size; - void **pages; + struct mtk_wed_buf *pages; struct mtk_wdma_desc *desc; dma_addr_t desc_phys; } tx_buf_ring; struct { int size; - struct mtk_rxbm_desc *desc; + struct mtk_wed_bm_desc *desc; dma_addr_t desc_phys; } rx_buf_ring; @@ -114,6 +123,13 @@ struct mtk_wed_device { dma_addr_t fdbk_phys; } rro; + struct { + int size; + struct mtk_wed_buf *pages; + struct mtk_wed_bm_desc *desc; + dma_addr_t desc_phys; + } hw_rro; + /* filled by driver: */ struct { union { @@ -123,6 +139,7 @@ struct mtk_wed_device { enum mtk_wed_bus_tye bus_type; void __iomem *base; u32 phy_base; + u32 id; u32 wpdma_phys; u32 wpdma_int; @@ -131,18 +148,35 @@ struct mtk_wed_device { u32 wpdma_txfree; u32 wpdma_rx_glo; u32 wpdma_rx; + u32 wpdma_rx_rro[MTK_WED_RX_QUEUES]; + u32 wpdma_rx_pg; bool wcid_512; + bool hw_rro; + bool msi; u16 token_start; unsigned int nbuf; unsigned int rx_nbuf; unsigned int rx_npkt; unsigned int rx_size; + unsigned int amsdu_max_len; u8 tx_tbit[MTK_WED_TX_QUEUES]; u8 rx_tbit[MTK_WED_RX_QUEUES]; + u8 rro_rx_tbit[MTK_WED_RX_QUEUES]; + u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES]; u8 txfree_tbit; + u8 amsdu_max_subframes; + + struct { + u8 se_group_nums; + u16 win_size; + u16 particular_sid; + u32 ack_sn_addr; + dma_addr_t particular_se_phys; + dma_addr_t addr_elem_phys[1024]; + } ind_cmd; u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); int (*offload_enable)(struct mtk_wed_device *wed); @@ -182,6 +216,14 @@ struct mtk_wed_ops { void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev, enum tc_setup_type type, void *type_data); + void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask, + bool reset); + void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + int (*ind_rx_ring_setup)(struct mtk_wed_device *dev, + void __iomem *regs); }; extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; @@ -206,16 +248,27 @@ mtk_wed_device_attach(struct mtk_wed_device *dev) return ret; } -static inline bool -mtk_wed_get_rx_capa(struct mtk_wed_device *dev) +static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev) { #ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (dev->version == 3) + return dev->wlan.hw_rro; + return dev->version != 1; #else return false; #endif } +static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + return dev->version == 3; +#else + return false; +#endif +} + #ifdef CONFIG_NET_MEDIATEK_SOC_WED #define mtk_wed_device_active(_dev) !!(_dev)->ops #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) @@ -242,6 +295,15 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev) #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev) #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data) +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \ + (_dev)->ops->start_hw_rro(_dev, _mask, _reset) +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \ + (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs) +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \ + (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs) +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \ + (_dev)->ops->ind_rx_ring_setup(_dev, _regs) + #else static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) { @@ -261,6 +323,10 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) #define mtk_wed_device_stop(_dev) do {} while (0) #define mtk_wed_device_dma_reset(_dev) do {} while (0) #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0) +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV #endif #endif diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 93417ba1ea..1a886666bb 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -30,7 +30,7 @@ #define LLCC_NPU 23 #define LLCC_WLHW 24 #define LLCC_PIMEM 25 -#define LLCC_DRE 26 +#define LLCC_ECC 26 #define LLCC_CVP 28 #define LLCC_MODPE 29 #define LLCC_APTCM 30 diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index bae5e2369b..307961b415 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -55,6 +55,29 @@ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) return copy_from_sockptr_offset(dst, src, 0, size); } +static inline int copy_struct_from_sockptr(void *dst, size_t ksize, + sockptr_t src, size_t usize) +{ + size_t size = min(ksize, usize); + size_t rest = max(ksize, usize) - size; + + if (!sockptr_is_kernel(src)) + return copy_struct_from_user(dst, ksize, src.user, size); + + if (usize < ksize) { + memset(dst + size, 0, rest); + } else if (usize > ksize) { + char *p = src.kernel; + + while (rest--) { + if (*p++) + return -E2BIG; + } + } + memcpy(dst, src.kernel, size); + return 0; +} + static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, const void *src, size_t size) { diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 8cc7a99927..255a0562ae 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -868,6 +868,7 @@ extern int devm_spi_register_controller(struct device *dev, extern void spi_unregister_controller(struct spi_controller *ctlr); #if IS_ENABLED(CONFIG_ACPI) +extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev); extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, struct acpi_device *adev, int index); @@ -1087,8 +1088,6 @@ struct spi_transfer { * @state: for use by whichever driver currently owns the message * @resources: for resource management when the SPI message is processed * @prepared: spi_prepare_message was called for the this message - * @t: for use with spi_message_alloc() when message and transfers have - * been allocated together * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1143,9 +1142,6 @@ struct spi_message { /* List of spi_res resources when the SPI message is processed */ struct list_head resources; - - /* For embedding transfers into the memory of the message */ - struct spi_transfer t[]; }; static inline void spi_message_init_no_memset(struct spi_message *m) @@ -1204,17 +1200,21 @@ struct spi_transfer *xfers, unsigned int num_xfers) */ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) { - struct spi_message *m; + struct spi_message_with_transfers { + struct spi_message m; + struct spi_transfer t[]; + } *mwt; + unsigned i; + + mwt = kzalloc(struct_size(mwt, t, ntrans), flags); + if (!mwt) + return NULL; - m = kzalloc(struct_size(m, t, ntrans), flags); - if (m) { - unsigned i; + spi_message_init_no_memset(&mwt->m); + for (i = 0; i < ntrans; i++) + spi_message_add_tail(&mwt->t[i], &mwt->m); - spi_message_init_no_memset(m); - for (i = 0; i < ntrans; i++) - spi_message_add_tail(&m->t[i], m); - } - return m; + return &mwt->m; } static inline void spi_message_free(struct spi_message *m) diff --git a/include/linux/spmi.h b/include/linux/spmi.h index eac1956a87..2a4ce4144f 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -166,7 +166,7 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv) struct device_node; -struct spmi_device *spmi_device_from_of(struct device_node *np); +struct spmi_device *spmi_find_device_by_of_node(struct device_node *np); int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf); int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf, size_t len); diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h index c36e7a3b45..3be2cb5647 100644 --- a/include/linux/stackleak.h +++ b/include/linux/stackleak.h @@ -14,6 +14,7 @@ #ifdef CONFIG_GCC_PLUGIN_STACKLEAK #include <asm/stacktrace.h> +#include <linux/linkage.h> /* * The lowest address on tsk's stack which we can plausibly erase. @@ -76,6 +77,11 @@ static inline void stackleak_task_init(struct task_struct *t) # endif } +asmlinkage void noinstr stackleak_erase(void); +asmlinkage void noinstr stackleak_erase_on_task_stack(void); +asmlinkage void noinstr stackleak_erase_off_task_stack(void); +void __no_caller_saved_registers noinstr stackleak_track_stack(void); + #else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ static inline void stackleak_task_init(struct task_struct *t) { } #endif diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 5acb779689..dee5ad6e48 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -304,7 +304,6 @@ struct plat_stmmacenet_data { unsigned int eee_usecs_rate; struct pci_dev *pdev; int int_snapshot_num; - int ext_snapshot_num; int msi_mac_vec; int msi_wol_vec; int msi_lpi_vec; diff --git a/include/linux/string.h b/include/linux/string.h index 5077776e99..ce137830a0 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -2,6 +2,7 @@ #ifndef _LINUX_STRING_H_ #define _LINUX_STRING_H_ +#include <linux/array_size.h> #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 9d1f5bb74d..58fb1f90ed 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -24,8 +24,8 @@ enum string_size_units { STRING_UNITS_2, /* use binary powers of 2^10 */ }; -void string_get_size(u64 size, u64 blk_size, enum string_size_units units, - char *buf, int len); +int string_get_size(u64 size, u64 blk_size, enum string_size_units units, + char *buf, int len); int parse_int_array_user(const char __user *from, size_t count, int **array); diff --git a/include/linux/stringify.h b/include/linux/stringify.h index 841cec8ed5..0e84cbe652 100644 --- a/include/linux/stringify.h +++ b/include/linux/stringify.h @@ -9,4 +9,6 @@ #define __stringify_1(x...) #x #define __stringify(x...) __stringify_1(x) +#define FILE_LINE __FILE__ ":" __stringify(__LINE__) + #endif /* !__LINUX_STRINGIFY_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index dbf5b21fea..b10f987509 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -17,6 +17,7 @@ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/svcauth.h> +#include <linux/lwq.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/pagevec.h> @@ -33,10 +34,10 @@ */ struct svc_pool { unsigned int sp_id; /* pool id; also node id on NUMA */ - spinlock_t sp_lock; /* protects all fields */ - struct list_head sp_sockets; /* pending sockets */ - unsigned int sp_nrthreads; /* # of threads in pool */ + struct lwq sp_xprts; /* pending transports */ + atomic_t sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ + struct llist_head sp_idle_threads; /* idle server threads */ /* statistics on pool operation */ struct percpu_counter sp_messages_arrived; @@ -49,7 +50,8 @@ struct svc_pool { /* bits for sp_flags */ enum { SP_TASK_PENDING, /* still work to do even if no xprt is queued */ - SP_CONGESTED, /* all threads are busy, none idle */ + SP_NEED_VICTIM, /* One thread needs to agree to exit */ + SP_VICTIM_REMAINS, /* One thread needs to actually exit */ }; @@ -88,12 +90,9 @@ struct svc_serv { int (*sv_threadfn)(void *data); #if defined(CONFIG_SUNRPC_BACKCHANNEL) - struct list_head sv_cb_list; /* queue for callback requests + struct lwq sv_cb_list; /* queue for callback requests * that arrive over the same * connection */ - spinlock_t sv_cb_lock; /* protects the svc_cb_list */ - wait_queue_head_t sv_cb_waitq; /* sleep here if there are no - * entries in the svc_cb_list */ bool sv_bc_enabled; /* service uses backchannel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ }; @@ -186,6 +185,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp); */ struct svc_rqst { struct list_head rq_all; /* all threads list */ + struct llist_node rq_idle; /* On the idle list */ struct rcu_head rq_rcu_head; /* for RCU deferred kfree */ struct svc_xprt * rq_xprt; /* transport ptr */ @@ -251,6 +251,7 @@ struct svc_rqst { * net namespace */ void ** rq_lease_breaker; /* The v4 client breaking a lease */ + unsigned int rq_status_counter; /* RPC processing counter */ }; /* bits for rq_flags */ @@ -261,8 +262,7 @@ enum { RQ_DROPME, /* drop current reply */ RQ_SPLICE_OK, /* turned off in gss privacy to prevent * encrypting page cache pages */ - RQ_VICTIM, /* about to be shut down */ - RQ_BUSY, /* request is busy */ + RQ_VICTIM, /* Have agreed to shut down */ RQ_DATA, /* request has data */ }; @@ -301,6 +301,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst) return (struct sockaddr *) &rqst->rq_daddr; } +/** + * svc_thread_should_stop - check if this thread should stop + * @rqstp: the thread that might need to stop + * + * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS + * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming + * the victim using this function. It should then promptly call + * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS + * so the task waiting for a thread to exit can wake and continue. + * + * Return values: + * %true: caller should invoke svc_exit_thread() + * %false: caller should do nothing + */ +static inline bool svc_thread_should_stop(struct svc_rqst *rqstp) +{ + if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags)) + set_bit(RQ_VICTIM, &rqstp->rq_flags); + + return test_bit(RQ_VICTIM, &rqstp->rq_flags); +} + struct svc_deferred_req { u32 prot; /* protocol (UDP or TCP) */ struct svc_xprt *xprt; @@ -413,8 +435,7 @@ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); int svc_pool_stats_open(struct svc_serv *serv, struct file *file); void svc_process(struct svc_rqst *rqstp); -int bc_svc_process(struct svc_serv *, struct rpc_rqst *, - struct svc_rqst *); +void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp); int svc_register(const struct svc_serv *, struct net *, const int, const unsigned short, const unsigned short); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index fa55d12dc7..8e20cd60e2 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -54,7 +54,7 @@ struct svc_xprt { const struct svc_xprt_ops *xpt_ops; struct kref xpt_ref; struct list_head xpt_list; - struct list_head xpt_ready; + struct lwq_node xpt_ready; unsigned long xpt_flags; struct svc_serv *xpt_server; /* service for transport */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 4ecc89301e..f85d3a0dac 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -57,6 +57,7 @@ struct xprt_class; struct seq_file; struct svc_serv; struct net; +#include <linux/lwq.h> /* * This describes a complete RPC request @@ -121,7 +122,7 @@ struct rpc_rqst { int rq_ntrans; #if defined(CONFIG_SUNRPC_BACKCHANNEL) - struct list_head rq_bc_list; /* Callback service list */ + struct lwq_node rq_bc_list; /* Callback service list */ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ #endif /* CONFIG_SUNRPC_BACKCHANEL */ diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h deleted file mode 100644 index 8d33767758..0000000000 --- a/include/linux/superhyway.h +++ /dev/null @@ -1,107 +0,0 @@ -/* - * include/linux/superhyway.h - * - * SuperHyway Bus definitions - * - * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __LINUX_SUPERHYWAY_H -#define __LINUX_SUPERHYWAY_H - -#include <linux/device.h> - -/* - * SuperHyway IDs - */ -#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183 - -struct superhyway_vcr_info { - u8 perr_flags; /* P-port Error flags */ - u8 merr_flags; /* Module Error flags */ - u16 mod_vers; /* Module Version */ - u16 mod_id; /* Module ID */ - u8 bot_mb; /* Bottom Memory block */ - u8 top_mb; /* Top Memory block */ -}; - -struct superhyway_ops { - int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr); - int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr); -}; - -struct superhyway_bus { - struct superhyway_ops *ops; -}; - -extern struct superhyway_bus superhyway_channels[]; - -struct superhyway_device_id { - unsigned int id; - unsigned long driver_data; -}; - -struct superhyway_device; -extern struct bus_type superhyway_bus_type; - -struct superhyway_driver { - char *name; - - const struct superhyway_device_id *id_table; - struct device_driver drv; - - int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id); - void (*remove)(struct superhyway_device *dev); -}; - -#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv) - -struct superhyway_device { - char name[32]; - - struct device dev; - - struct superhyway_device_id id; - struct superhyway_driver *drv; - struct superhyway_bus *bus; - - int num_resources; - struct resource *resource; - struct superhyway_vcr_info vcr; -}; - -#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev) - -#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev) -#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) - -static inline int -superhyway_read_vcr(struct superhyway_device *dev, unsigned long base, - struct superhyway_vcr_info *vcr) -{ - return dev->bus->ops->read_vcr(base, vcr); -} - -static inline int -superhyway_write_vcr(struct superhyway_device *dev, unsigned long base, - struct superhyway_vcr_info vcr) -{ - return dev->bus->ops->write_vcr(base, vcr); -} - -extern int superhyway_scan_bus(struct superhyway_bus *); - -/* drivers/sh/superhyway/superhyway.c */ -int superhyway_register_driver(struct superhyway_driver *); -void superhyway_unregister_driver(struct superhyway_driver *); -int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *); -int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices); - -/* drivers/sh/superhyway/superhyway-sysfs.c */ -extern const struct attribute_group *superhyway_dev_groups[]; - -#endif /* __LINUX_SUPERHYWAY_H */ - diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index cb79808059..5b67f0f47d 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -44,7 +44,7 @@ struct ssam_event { u8 command_id; u8 instance_id; u16 length; - u8 data[]; + u8 data[] __counted_by(length); }; /** diff --git a/include/linux/swap.h b/include/linux/swap.h index 493487ed7c..e73285e3c8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -298,6 +298,7 @@ struct swap_info_struct { unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ + struct bdev_handle *bdev_handle;/* open handle of the bdev */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ @@ -552,6 +553,11 @@ static inline int swap_duplicate(swp_entry_t swp) return 0; } +static inline int swapcache_prepare(swp_entry_t swp) +{ + return 0; +} + static inline void swap_free(swp_entry_t swp) { } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bbbd6fac3a..59fdd40740 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -356,7 +356,6 @@ asmlinkage long sys_lremovexattr(const char __user *path, const char __user *name); asmlinkage long sys_fremovexattr(int fd, const char __user *name); asmlinkage long sys_getcwd(char __user *buf, unsigned long size); -asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len); asmlinkage long sys_eventfd2(unsigned int count, int flags); asmlinkage long sys_epoll_create1(int flags); asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, @@ -550,6 +549,16 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, asmlinkage long sys_futex_waitv(struct futex_waitv *waiters, unsigned int nr_futexes, unsigned int flags, struct __kernel_timespec __user *timeout, clockid_t clockid); + +asmlinkage long sys_futex_wake(void __user *uaddr, unsigned long mask, int nr, unsigned int flags); + +asmlinkage long sys_futex_wait(void __user *uaddr, unsigned long val, unsigned long mask, + unsigned int flags, struct __kernel_timespec __user *timespec, + clockid_t clockid); + +asmlinkage long sys_futex_requeue(struct futex_waitv __user *waiters, + unsigned int flags, int nr_wake, int nr_requeue); + asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp, struct __kernel_timespec __user *rmtp); asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp, diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fd3fe5c8c1..b717a70219 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -181,6 +181,8 @@ struct bin_attribute { char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *, + loff_t, int); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, struct vm_area_struct *vma); }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 3c5efeeb02..b646b574b0 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -152,6 +152,7 @@ struct tcp_request_sock { u64 snt_synack; /* first SYNACK sent time */ bool tfo_listener; bool is_mptcp; + bool req_usec_ts; #if IS_ENABLED(CONFIG_MPTCP) bool drop_req; #endif @@ -165,6 +166,11 @@ struct tcp_request_sock { * after data-in-SYN. */ u8 syn_tos; +#ifdef CONFIG_TCP_AO + u8 ao_keyid; + u8 ao_rcv_next; + bool used_tcp_ao; +#endif }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) @@ -172,6 +178,15 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) return (struct tcp_request_sock *)req; } +static inline bool tcp_rsk_used_ao(const struct request_sock *req) +{ +#ifndef CONFIG_TCP_AO + return false; +#else + return tcp_rsk(req)->used_tcp_ao; +#endif +} + #define TCP_RMEM_TO_WIN_SCALE 8 struct tcp_sock { @@ -257,7 +272,8 @@ struct tcp_sock { u8 compressed_ack; u8 dup_ack_counter:2, tlp_retrans:1, /* TLP is a retransmission */ - unused:5; + tcp_usec_ts:1, /* TSval values in usec */ + unused:4; u32 chrono_start; /* Start time in jiffies of a TCP chrono */ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ @@ -377,6 +393,14 @@ struct tcp_sock { * Total data bytes retransmitted */ u32 total_retrans; /* Total retransmits for entire connection */ + u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */ + u16 total_rto; /* Total number of RTO timeouts, including + * SYN/SYN-ACK and recurring timeouts. + */ + u16 total_rto_recoveries; /* Total number of RTO recoveries, + * including any unfinished recovery. + */ + u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */ u32 urg_seq; /* Seq of received urgent pointer */ unsigned int keepalive_time; /* time before keep alive takes place */ @@ -437,13 +461,18 @@ struct tcp_sock { bool syn_smc; /* SYN includes SMC */ #endif -#ifdef CONFIG_TCP_MD5SIG -/* TCP AF-Specific parts; only used by MD5 Signature support so far */ +#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) +/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */ const struct tcp_sock_af_ops *af_specific; +#ifdef CONFIG_TCP_MD5SIG /* TCP MD5 Signature Option information */ struct tcp_md5sig_info __rcu *md5sig_info; #endif +#ifdef CONFIG_TCP_AO + struct tcp_ao_info __rcu *ao_info; +#endif +#endif /* TCP fastopen related information */ struct tcp_fastopen_request *fastopen_req; @@ -463,15 +492,17 @@ enum tsq_enum { TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call * tcp_v{4|6}_mtu_reduced() */ + TCP_ACK_DEFERRED, /* TX pure ack is deferred */ }; enum tsq_flags { - TSQF_THROTTLED = (1UL << TSQ_THROTTLED), - TSQF_QUEUED = (1UL << TSQ_QUEUED), - TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED), - TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), - TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), - TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), + TSQF_THROTTLED = BIT(TSQ_THROTTLED), + TSQF_QUEUED = BIT(TSQ_QUEUED), + TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED), + TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED), + TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED), + TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED), + TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED), }; #define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) @@ -497,6 +528,9 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif +#ifdef CONFIG_TCP_AO + struct tcp_ao_info __rcu *ao_info; +#endif }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) @@ -566,4 +600,9 @@ void tcp_sock_set_quickack(struct sock *sk, int val); int tcp_sock_set_syncnt(struct sock *sk, int val); int tcp_sock_set_user_timeout(struct sock *sk, int val); +static inline bool dst_tcp_usec_ts(const struct dst_entry *dst) +{ + return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS); +} + #endif /* _LINUX_TCP_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index a5ae4af955..1da1739d75 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -122,7 +122,6 @@ struct thermal_cooling_device { * @devdata: private pointer for device private data * @trips: an array of struct thermal_trip * @num_trips: number of trip points the thermal zone supports - * @trips_disabled; bitmap for disabled trips * @passive_delay_jiffies: number of jiffies to wait between polls when * performing passive cooling. * @polling_delay_jiffies: number of jiffies to wait between polls when @@ -150,6 +149,7 @@ struct thermal_cooling_device { * @node: node in thermal_tz_list (in thermal_core.c) * @poll_queue: delayed work for polling * @notify_event: Last notification event + * @suspended: thermal zone suspend indicator */ struct thermal_zone_device { int id; @@ -163,7 +163,6 @@ struct thermal_zone_device { void *devdata; struct thermal_trip *trips; int num_trips; - unsigned long trips_disabled; /* bitmap for disabled trips */ unsigned long passive_delay_jiffies; unsigned long polling_delay_jiffies; int temperature; @@ -183,6 +182,7 @@ struct thermal_zone_device { struct list_head node; struct delayed_work poll_queue; enum thermal_notify_event notify_event; + bool suspended; }; /** @@ -201,7 +201,8 @@ struct thermal_governor { char name[THERMAL_NAME_LENGTH]; int (*bind_to_tz)(struct thermal_zone_device *tz); void (*unbind_from_tz)(struct thermal_zone_device *tz); - int (*throttle)(struct thermal_zone_device *tz, int trip); + int (*throttle)(struct thermal_zone_device *tz, + const struct thermal_trip *trip); struct list_head governor_list; }; @@ -288,6 +289,9 @@ int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id, int for_each_thermal_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data); +int thermal_zone_for_each_trip(struct thermal_zone_device *tz, + int (*cb)(struct thermal_trip *, void *), + void *data); int thermal_zone_get_num_trips(struct thermal_zone_device *tz); int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp); @@ -322,18 +326,22 @@ const char *thermal_zone_device_type(struct thermal_zone_device *tzd); int thermal_zone_device_id(struct thermal_zone_device *tzd); struct device *thermal_zone_device(struct thermal_zone_device *tzd); +int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + unsigned long upper, unsigned long lower, + unsigned int weight); int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *, unsigned long, unsigned long, unsigned int); +int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev); int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); -void thermal_zone_device_exec(struct thermal_zone_device *tz, - void (*cb)(struct thermal_zone_device *, - unsigned long), - unsigned long data); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 02333f47c9..6151c210d9 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -175,7 +175,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); * enum tb_link_width - Thunderbolt/USB4 link width * @TB_LINK_WIDTH_SINGLE: Single lane link * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link - * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters + * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers */ enum tb_link_width { diff --git a/include/linux/tick.h b/include/linux/tick.h index 9459fef5b8..716d17f31c 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -140,14 +140,6 @@ extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); - -static inline void tick_nohz_idle_stop_tick_protected(void) -{ - local_irq_disable(); - tick_nohz_idle_stop_tick(); - local_irq_enable(); -} - #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } @@ -170,8 +162,6 @@ static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } - -static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL diff --git a/include/linux/timer.h b/include/linux/timer.h index 9162f27581..26a545bb01 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -77,8 +77,7 @@ struct timer_list { .entry = { .next = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .flags = (_flags), \ - __TIMER_LOCKDEP_MAP_INITIALIZER( \ - __FILE__ ":" __stringify(__LINE__)) \ + __TIMER_LOCKDEP_MAP_INITIALIZER(FILE_LINE) \ } #define DEFINE_TIMER(_name, _function) \ diff --git a/include/linux/torture.h b/include/linux/torture.h index 017f0f7108..c98d0c83d1 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -121,10 +121,15 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #define torture_stop_kthread(n, tp) \ _torture_stop_kthread("Stopping " #n " task", &(tp)) +/* Scheduler-related definitions. */ #ifdef CONFIG_PREEMPTION #define torture_preempt_schedule() __preempt_schedule() #else #define torture_preempt_schedule() do { } while (0) #endif +#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST) +long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask); +#endif + #endif /* __LINUX_TORTURE_H */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index cf9f0c6179..d68ff9b124 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -652,7 +652,7 @@ struct trace_event_file { struct list_head list; struct trace_event_call *event_call; struct event_filter __rcu *filter; - struct eventfs_file *ef; + struct eventfs_inode *ei; struct trace_array *tr; struct trace_subsystem_dir *system; struct list_head triggers; @@ -765,7 +765,8 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, - u64 *probe_offset, u64 *probe_addr); + u64 *probe_offset, u64 *probe_addr, + unsigned long *missed); int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else @@ -805,7 +806,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) static inline int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, - u64 *probe_addr) + u64 *probe_addr, unsigned long *missed) { return -EOPNOTSUPP; } @@ -881,6 +882,7 @@ extern void perf_kprobe_destroy(struct perf_event *event); extern int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, const char **symbol, u64 *probe_offset, u64 *probe_addr, + unsigned long *missed, bool perf_type_tracepoint); #endif #ifdef CONFIG_UPROBE_EVENTS diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 6be92bf559..3691e0e76a 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -14,6 +14,7 @@ struct trace_seq { char buffer[PAGE_SIZE]; struct seq_buf seq; + size_t readpos; int full; }; @@ -22,6 +23,7 @@ trace_seq_init(struct trace_seq *s) { seq_buf_init(&s->seq, s->buffer, PAGE_SIZE); s->full = 0; + s->readpos = 0; } /** diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h index 009072792f..7a5fe17b6b 100644 --- a/include/linux/tracefs.h +++ b/include/linux/tracefs.h @@ -23,26 +23,69 @@ struct file_operations; struct eventfs_file; -struct dentry *eventfs_create_events_dir(const char *name, - struct dentry *parent); - -struct eventfs_file *eventfs_add_subsystem_dir(const char *name, - struct dentry *parent); +/** + * eventfs_callback - A callback function to create dynamic files in eventfs + * @name: The name of the file that is to be created + * @mode: return the file mode for the file (RW access, etc) + * @data: data to pass to the created file ops + * @fops: the file operations of the created file + * + * The evetnfs files are dynamically created. The struct eventfs_entry array + * is passed to eventfs_create_dir() or eventfs_create_events_dir() that will + * be used to create the files within those directories. When a lookup + * or access to a file within the directory is made, the struct eventfs_entry + * array is used to find a callback() with the matching name that is being + * referenced (for lookups, the entire array is iterated and each callback + * will be called). + * + * The callback will be called with @name for the name of the file to create. + * The callback can return less than 1 to indicate that no file should be + * created. + * + * If a file is to be created, then @mode should be populated with the file + * mode (permissions) for which the file is created for. This would be + * used to set the created inode i_mode field. + * + * The @data should be set to the data passed to the other file operations + * (read, write, etc). Note, @data will also point to the data passed in + * to eventfs_create_dir() or eventfs_create_events_dir(), but the callback + * can replace the data if it chooses to. Otherwise, the original data + * will be used for the file operation functions. + * + * The @fops should be set to the file operations that will be used to create + * the inode. + * + * NB. This callback is called while holding internal locks of the eventfs + * system. The callback must not call any code that might also call into + * the tracefs or eventfs system or it will risk creating a deadlock. + */ +typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data, + const struct file_operations **fops); -struct eventfs_file *eventfs_add_dir(const char *name, - struct eventfs_file *ef_parent); +/** + * struct eventfs_entry - dynamically created eventfs file call back handler + * @name: Then name of the dynamic file in an eventfs directory + * @callback: The callback to get the fops of the file when it is created + * + * See evenfs_callback() typedef for how to set up @callback. + */ +struct eventfs_entry { + const char *name; + eventfs_callback callback; +}; -int eventfs_add_file(const char *name, umode_t mode, - struct eventfs_file *ef_parent, void *data, - const struct file_operations *fops); +struct eventfs_inode; -int eventfs_add_events_file(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops); +struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent, + const struct eventfs_entry *entries, + int size, void *data); -void eventfs_remove(struct eventfs_file *ef); +struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent, + const struct eventfs_entry *entries, + int size, void *data); -void eventfs_remove_events_dir(struct dentry *dentry); +void eventfs_remove_events_dir(struct eventfs_inode *ei); +void eventfs_remove_dir(struct eventfs_inode *ei); struct dentry *tracefs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, diff --git a/include/linux/tsm.h b/include/linux/tsm.h new file mode 100644 index 0000000000..de8324a222 --- /dev/null +++ b/include/linux/tsm.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TSM_H +#define __TSM_H + +#include <linux/sizes.h> +#include <linux/types.h> + +#define TSM_INBLOB_MAX 64 +#define TSM_OUTBLOB_MAX SZ_32K + +/* + * Privilege level is a nested permission concept to allow confidential + * guests to partition address space, 4-levels are supported. + */ +#define TSM_PRIVLEVEL_MAX 3 + +/** + * struct tsm_desc - option descriptor for generating tsm report blobs + * @privlevel: optional privilege level to associate with @outblob + * @inblob_len: sizeof @inblob + * @inblob: arbitrary input data + */ +struct tsm_desc { + unsigned int privlevel; + size_t inblob_len; + u8 inblob[TSM_INBLOB_MAX]; +}; + +/** + * struct tsm_report - track state of report generation relative to options + * @desc: input parameters to @report_new() + * @outblob_len: sizeof(@outblob) + * @outblob: generated evidence to provider to the attestation agent + * @auxblob_len: sizeof(@auxblob) + * @auxblob: (optional) auxiliary data to the report (e.g. certificate data) + */ +struct tsm_report { + struct tsm_desc desc; + size_t outblob_len; + u8 *outblob; + size_t auxblob_len; + u8 *auxblob; +}; + +/** + * struct tsm_ops - attributes and operations for tsm instances + * @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider + * @privlevel_floor: convey base privlevel for nested scenarios + * @report_new: Populate @report with the report blob and auxblob + * (optional), return 0 on successful population, or -errno otherwise + * + * Implementation specific ops, only one is expected to be registered at + * a time i.e. only one of "sev-guest", "tdx-guest", etc. + */ +struct tsm_ops { + const char *name; + const unsigned int privlevel_floor; + int (*report_new)(struct tsm_report *report, void *data); +}; + +extern const struct config_item_type tsm_report_default_type; + +/* publish @privlevel, @privlevel_floor, and @auxblob attributes */ +extern const struct config_item_type tsm_report_extra_type; + +int tsm_register(const struct tsm_ops *ops, void *priv, + const struct config_item_type *type); +int tsm_unregister(const struct tsm_ops *ops); +#endif /* __TSM_H */ diff --git a/include/linux/tty.h b/include/linux/tty.h index f002d0f25d..4b6340ac2a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -390,14 +390,12 @@ int vcs_init(void); extern const struct class tty_class; /** - * tty_kref_get - get a tty reference - * @tty: tty device + * tty_kref_get - get a tty reference + * @tty: tty device * - * Return a new reference to a tty object. The caller must hold - * sufficient locks/counts to ensure that their existing reference cannot - * go away + * Returns: a new reference to a tty object. The caller must hold sufficient + * locks/counts to ensure that their existing reference cannot go away */ - static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) { if (tty) @@ -416,8 +414,8 @@ unsigned int tty_chars_in_buffer(struct tty_struct *tty); unsigned int tty_write_room(struct tty_struct *tty); void tty_driver_flush_buffer(struct tty_struct *tty); void tty_unthrottle(struct tty_struct *tty); -int tty_throttle_safe(struct tty_struct *tty); -int tty_unthrottle_safe(struct tty_struct *tty); +bool tty_throttle_safe(struct tty_struct *tty); +bool tty_unthrottle_safe(struct tty_struct *tty); int tty_do_resize(struct tty_struct *tty, struct winsize *ws); int tty_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); @@ -435,14 +433,13 @@ void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud); /** - * tty_get_baud_rate - get tty bit rates - * @tty: tty to query + * tty_get_baud_rate - get tty bit rates + * @tty: tty to query * - * Returns the baud rate as an integer for this terminal. The - * termios lock must be held by the caller and the terminal bit - * flags may be updated. + * Returns: the baud rate as an integer for this terminal. The termios lock + * must be held by the caller and the terminal bit flags may be updated. * - * Locking: none + * Locking: none */ static inline speed_t tty_get_baud_rate(struct tty_struct *tty) { diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h index cf3ada3e82..c499ae809c 100644 --- a/include/linux/ucs2_string.h +++ b/include/linux/ucs2_string.h @@ -10,6 +10,7 @@ typedef u16 ucs2_char_t; unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength); unsigned long ucs2_strlen(const ucs2_char_t *s); unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); +ssize_t ucs2_strscpy(ucs2_char_t *dst, const ucs2_char_t *src, size_t count); int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); unsigned long ucs2_utf8size(const ucs2_char_t *src); diff --git a/include/linux/uio.h b/include/linux/uio.h index 42bce38a8e..b6214cbf2a 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -21,12 +21,12 @@ struct kvec { enum iter_type { /* iter types */ + ITER_UBUF, ITER_IOVEC, - ITER_KVEC, ITER_BVEC, + ITER_KVEC, ITER_XARRAY, ITER_DISCARD, - ITER_UBUF, }; #define ITER_SOURCE 1 // == WRITE @@ -43,11 +43,7 @@ struct iov_iter { bool copy_mc; bool nofault; bool data_source; - bool user_backed; - union { - size_t iov_offset; - int last_offset; - }; + size_t iov_offset; /* * Hack alert: overlay ubuf_iovec with iovec + count, so * that the members resolve correctly regardless of the type @@ -143,7 +139,7 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i) static inline bool user_backed_iter(const struct iov_iter *i) { - return i->user_backed; + return iter_is_ubuf(i) || iter_is_iovec(i); } /* @@ -342,27 +338,6 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) return npages; } -struct csum_state { - __wsum csum; - size_t off; -}; - -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); -size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); - -static __always_inline __must_check -bool csum_and_copy_from_iter_full(void *addr, size_t bytes, - __wsum *csum, struct iov_iter *i) -{ - size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i); - if (likely(copied == bytes)) - return true; - iov_iter_revert(i, copied); - return false; -} -size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, - struct iov_iter *i); - struct iovec *iovec_from_user(const struct iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_iov, bool compat); @@ -383,7 +358,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, *i = (struct iov_iter) { .iter_type = ITER_UBUF, .copy_mc = false, - .user_backed = true, .data_source = direction, .ubuf = buf, .count = count, diff --git a/include/linux/units.h b/include/linux/units.h index 2793a41e73..45110daaf8 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -2,6 +2,7 @@ #ifndef _LINUX_UNITS_H #define _LINUX_UNITS_H +#include <linux/bits.h> #include <linux/math.h> /* Metric prefixes in accordance with Système international (d'unités) */ @@ -31,6 +32,10 @@ #define MICROWATT_PER_MILLIWATT 1000UL #define MICROWATT_PER_WATT 1000000UL +#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE) +#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE) +#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE) + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) diff --git a/include/linux/usb.h b/include/linux/usb.h index a21074861f..8c61643acd 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1823,22 +1823,6 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size, void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma); -#if 0 -struct urb *usb_buffer_map(struct urb *urb); -void usb_buffer_dmasync(struct urb *urb); -void usb_buffer_unmap(struct urb *urb); -#endif - -struct scatterlist; -int usb_buffer_map_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int nents); -#if 0 -void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int n_hw_ents); -#endif -void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, - struct scatterlist *sg, int n_hw_ents); - /*-------------------------------------------------------------------* * SYNCHRONOUS CALL SUPPORT * *-------------------------------------------------------------------*/ diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 0b4f2d5faa..5a7f96684e 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -64,6 +64,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_PMQOS BIT(15) #define CI_HDRC_PHY_VBUS_CONTROL BIT(16) #define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17) +#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 6014340ba9..af3cd2aae4 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -35,6 +35,14 @@ * are ready. The control transfer will then be kept from completing till * all the function drivers that requested for USB_GADGET_DELAYED_STAUS * invoke usb_composite_setup_continue(). + * + * NOTE: USB_GADGET_DELAYED_STATUS must not be used in UDC drivers: they + * must delay completing the status stage for 0-length control transfers + * regardless of the whether USB_GADGET_DELAYED_STATUS is returned from + * the gadget driver's setup() callback. + * Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS, + * which is a bug. These drivers must be fixed and USB_GADGET_DELAYED_STATUS + * must be contained within the composite framework. */ #define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 75bda07833..6532beb587 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -711,6 +711,15 @@ static inline int usb_gadget_check_config(struct usb_gadget *gadget) * get_interface. Setting a configuration (or interface) is where * endpoints should be activated or (config 0) shut down. * + * The gadget driver's setup() callback does not have to queue a response to + * ep0 within the setup() call, the driver can do it after setup() returns. + * The UDC driver must wait until such a response is queued before proceeding + * with the data/status stages of the control transfer. + * + * NOTE: Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS + * being returned from the setup() callback, which is a bug. See the comment + * next to USB_GADGET_DELAYED_STATUS for details. + * * (Note that only the default control endpoint is supported. Neither * hosts nor devices generally support control traffic except to ep0.) * diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 61d4f0b793..00724b4f6e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -484,8 +484,25 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev, extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); +#ifdef CONFIG_USB_PCI_AMD extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); +static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, + const struct hc_driver *driver) +{ + if (!usb_hcd_amd_remote_wakeup_quirk(dev)) + return false; + if (driver->flags & (HCD_USB11 | HCD_USB3)) + return true; + return false; +} +#else /* CONFIG_USB_PCI_AMD */ +static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, + const struct hc_driver *driver) +{ + return false; +} +#endif extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif /* CONFIG_USB_PCI */ diff --git a/include/linux/usb/ljca.h b/include/linux/usb/ljca.h new file mode 100644 index 0000000000..47661feda9 --- /dev/null +++ b/include/linux/usb/ljca.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Intel Corporation. All rights reserved. + */ +#ifndef _LINUX_USB_LJCA_H_ +#define _LINUX_USB_LJCA_H_ + +#include <linux/auxiliary_bus.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#define LJCA_MAX_GPIO_NUM 64 + +#define auxiliary_dev_to_ljca_client(auxiliary_dev) \ + container_of(auxiliary_dev, struct ljca_client, auxdev) + +struct ljca_adapter; + +/** + * typedef ljca_event_cb_t - event callback function signature + * + * @context: the execution context of who registered this callback + * @cmd: the command from device for this event + * @evt_data: the event data payload + * @len: the event data payload length + * + * The callback function is called in interrupt context and the data payload is + * only valid during the call. If the user needs later access of the data, it + * must copy it. + */ +typedef void (*ljca_event_cb_t)(void *context, u8 cmd, const void *evt_data, int len); + +/** + * struct ljca_client - represent a ljca client device + * + * @type: ljca client type + * @id: ljca client id within same client type + * @link: ljca client on the same ljca adapter + * @auxdev: auxiliary device object + * @adapter: ljca adapter the ljca client sit on + * @context: the execution context of the event callback + * @event_cb: ljca client driver register this callback to get + * firmware asynchronous rx buffer pending notifications + * @event_cb_lock: spinlock to protect event callback + */ +struct ljca_client { + u8 type; + u8 id; + struct list_head link; + struct auxiliary_device auxdev; + struct ljca_adapter *adapter; + + void *context; + ljca_event_cb_t event_cb; + /* lock to protect event_cb */ + spinlock_t event_cb_lock; +}; + +/** + * struct ljca_gpio_info - ljca gpio client device info + * + * @num: ljca gpio client device pin number + * @valid_pin_map: ljca gpio client device valid pin mapping + */ +struct ljca_gpio_info { + unsigned int num; + DECLARE_BITMAP(valid_pin_map, LJCA_MAX_GPIO_NUM); +}; + +/** + * struct ljca_i2c_info - ljca i2c client device info + * + * @id: ljca i2c client device identification number + * @capacity: ljca i2c client device capacity + * @intr_pin: ljca i2c client device interrupt pin number if exists + */ +struct ljca_i2c_info { + u8 id; + u8 capacity; + u8 intr_pin; +}; + +/** + * struct ljca_spi_info - ljca spi client device info + * + * @id: ljca spi client device identification number + * @capacity: ljca spi client device capacity + */ +struct ljca_spi_info { + u8 id; + u8 capacity; +}; + +/** + * ljca_register_event_cb - register a callback function to receive events + * + * @client: ljca client device + * @event_cb: callback function + * @context: execution context of event callback + * + * Return: 0 in case of success, negative value in case of error + */ +int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb, void *context); + +/** + * ljca_unregister_event_cb - unregister the callback function for an event + * + * @client: ljca client device + */ +void ljca_unregister_event_cb(struct ljca_client *client); + +/** + * ljca_transfer - issue a LJCA command and wait for a response + * + * @client: ljca client device + * @cmd: the command to be sent to the device + * @obuf: the buffer to be sent to the device; it can be NULL if the user + * doesn't need to transmit data with this command + * @obuf_len: the size of the buffer to be sent to the device; it should + * be 0 when obuf is NULL + * @ibuf: any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len: must be initialized to the input buffer size + * + * Return: the actual length of response data for success, negative value for errors + */ +int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf, + u8 obuf_len, u8 *ibuf, u8 ibuf_len); + +/** + * ljca_transfer_noack - issue a LJCA command without a response + * + * @client: ljca client device + * @cmd: the command to be sent to the device + * @obuf: the buffer to be sent to the device; it can be NULL if the user + * doesn't need to transmit data with this command + * @obuf_len: the size of the buffer to be sent to the device + * + * Return: 0 for success, negative value for errors + */ +int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf, + u8 obuf_len); + +#endif diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index c59fb79a42..eb626af0e4 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -228,6 +228,7 @@ enum pd_pdo_type { #define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */ #define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */ #define PDO_FIXED_FRS_CURR_SHIFT 23 +#define PDO_FIXED_PEAK_CURR_SHIFT 20 #define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ #define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */ diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index b057250704..3a747938cd 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h @@ -376,6 +376,7 @@ | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \ | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7)) +#define VDO_TYPEC_CABLE_SPEED(vdo) ((vdo) & 0x7) #define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3) /* diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index d418c55523..372898d9ee 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -5,16 +5,6 @@ * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2019 Renesas Electronics Corporation * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #ifndef RENESAS_USB_H #define RENESAS_USB_H diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 8fa7812079..a05d6f6f25 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -202,6 +202,8 @@ struct typec_cable_desc { * @accessory: Audio, Debug or none. * @identity: Discover Identity command data * @pd_revision: USB Power Delivery Specification Revision if supported + * @attach: Notification about attached USB device + * @deattach: Notification about removed USB device * * Details about a partner that is attached to USB Type-C port. If @identity * member exists when partner is registered, a directory named "identity" is @@ -217,6 +219,9 @@ struct typec_partner_desc { enum typec_accessory accessory; struct usb_pd_identity *identity; u16 pd_revision; /* 0300H = "3.0" */ + + void (*attach)(struct typec_partner *partner, struct device *dev); + void (*deattach)(struct typec_partner *partner, struct device *dev); }; /** @@ -335,4 +340,36 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_ int typec_partner_set_usb_power_delivery(struct typec_partner *partner, struct usb_power_delivery *pd); +/** + * struct typec_connector - Representation of Type-C port for external drivers + * @attach: notification about device removal + * @deattach: notification about device removal + * + * Drivers that control the USB and other ports (DisplayPorts, etc.), that are + * connected to the Type-C connectors, can use these callbacks to inform the + * Type-C connector class about connections and disconnections. That information + * can then be used by the typec-port drivers to power on or off parts that are + * needed or not needed - as an example, in USB mode if USB2 device is + * enumerated, USB3 components (retimers, phys, and what have you) do not need + * to be powered on. + * + * The attached (enumerated) devices will be liked with the typec-partner device. + */ +struct typec_connector { + void (*attach)(struct typec_connector *con, struct device *dev); + void (*deattach)(struct typec_connector *con, struct device *dev); +}; + +static inline void typec_attach(struct typec_connector *con, struct device *dev) +{ + if (con && con->attach) + con->attach(con, dev); +} + +static inline void typec_deattach(struct typec_connector *con, struct device *dev) +{ + if (con && con->deattach) + con->deattach(con, dev); +} + #endif /* __LINUX_USB_TYPEC_H */ diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h index 8d09c2f0a9..1f35809852 100644 --- a/include/linux/usb/typec_dp.h +++ b/include/linux/usb/typec_dp.h @@ -67,8 +67,10 @@ enum { #define DP_CAP_UFP_D 1 #define DP_CAP_DFP_D 2 #define DP_CAP_DFP_D_AND_UFP_D 3 -#define DP_CAP_DP_SIGNALING BIT(2) /* Always set */ -#define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */ +#define DP_CAP_DP_SIGNALLING(_cap_) (((_cap_) & GENMASK(5, 2)) >> 2) +#define DP_CAP_SIGNALLING_HBR3 1 +#define DP_CAP_SIGNALLING_UHBR10 2 +#define DP_CAP_SIGNALLING_UHBR20 3 #define DP_CAP_RECEPTACLE BIT(6) #define DP_CAP_USB BIT(7) #define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8) @@ -78,6 +80,13 @@ enum { DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_)) #define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_)) +#define DP_CAP_UHBR_13_5_SUPPORT BIT(26) +#define DP_CAP_CABLE_TYPE(_cap_) (((_cap_) & GENMASK(29, 28)) >> 28) +#define DP_CAP_CABLE_TYPE_PASSIVE 0 +#define DP_CAP_CABLE_TYPE_RE_TIMER 1 +#define DP_CAP_CABLE_TYPE_RE_DRIVER 2 +#define DP_CAP_CABLE_TYPE_OPTICAL 3 +#define DP_CAP_DPAM_VERSION BIT(30) /* DisplayPort Status Update VDO bits */ #define DP_STATUS_CONNECTION(_status_) ((_status_) & 3) @@ -97,13 +106,24 @@ enum { #define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3) #define DP_CONF_UFP_U_AS_DFP_D BIT(0) #define DP_CONF_UFP_U_AS_UFP_D BIT(1) -#define DP_CONF_SIGNALING_DP BIT(2) -#define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */ +#define DP_CONF_SIGNALLING_MASK GENMASK(5, 2) +#define DP_CONF_SIGNALLING_SHIFT 2 +#define DP_CONF_SIGNALLING_HBR3 1 +#define DP_CONF_SIGNALLING_UHBR10 2 +#define DP_CONF_SIGNALLING_UHBR20 3 #define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8 #define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8) /* Helper for setting/getting the pin assignment value to the configuration */ #define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8) #define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8) +#define DP_CONF_UHBR13_5_SUPPORT BIT(26) +#define DP_CONF_CABLE_TYPE_MASK GENMASK(29, 28) +#define DP_CONF_CABLE_TYPE_SHIFT 28 +#define DP_CONF_CABLE_TYPE_PASSIVE 0 +#define DP_CONF_CABLE_TYPE_RE_TIMER 1 +#define DP_CONF_CABLE_TYPE_RE_DRIVER 2 +#define DP_CONF_CABLE_TYPE_OPTICAL 3 +#define DP_CONF_DPAM_VERSION BIT(30) #endif /* __USB_TYPEC_DP_H */ diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h index 63dd44b72e..c7a2153bd6 100644 --- a/include/linux/usb/typec_tbt.h +++ b/include/linux/usb/typec_tbt.h @@ -46,6 +46,7 @@ struct typec_thunderbolt_data { #define TBT_CABLE_OPTICAL BIT(21) #define TBT_CABLE_RETIMER BIT(22) #define TBT_CABLE_LINK_TRAINING BIT(23) +#define TBT_CABLE_ACTIVE_PASSIVE BIT(25) #define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16) #define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19) diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 45f09bec02..6030a82356 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -65,6 +65,10 @@ enum rlimit_type { UCOUNT_RLIMIT_COUNTS, }; +#if IS_ENABLED(CONFIG_BINFMT_MISC) +struct binfmt_misc; +#endif + struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; @@ -102,6 +106,10 @@ struct user_namespace { struct ucounts *ucounts; long ucount_max[UCOUNT_COUNTS]; long rlimit_max[UCOUNT_RLIMIT_COUNTS]; + +#if IS_ENABLED(CONFIG_BINFMT_MISC) + struct binfmt_misc *binfmt_misc; +#endif } __randomize_layout; struct ucounts { diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index ac8c685409..f2dc19f40d 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -161,11 +161,22 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) } static inline bool vma_can_userfault(struct vm_area_struct *vma, - unsigned long vm_flags) + unsigned long vm_flags, + bool wp_async) { + vm_flags &= __VM_UFFD_FLAGS; + if ((vm_flags & VM_UFFD_MINOR) && (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) return false; + + /* + * If wp async enabled, and WP is the only mode enabled, allow any + * memory type. + */ + if (wp_async && (vm_flags == VM_UFFD_WP)) + return true; + #ifndef CONFIG_PTE_MARKER_UFFD_WP /* * If user requested uffd-wp but not enabled pte markers for @@ -175,6 +186,8 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma, if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) return false; #endif + + /* By default, allow any of anon|shmem|hugetlb */ return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || vma_is_shmem(vma); } @@ -197,6 +210,7 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); +extern bool userfaultfd_wp_async(struct vm_area_struct *vma); #else /* CONFIG_USERFAULTFD */ @@ -207,6 +221,13 @@ static inline vm_fault_t handle_userfault(struct vm_fault *vmf, return VM_FAULT_SIGBUS; } +static inline long uffd_wp_range(struct vm_area_struct *vma, + unsigned long start, unsigned long len, + bool enable_wp) +{ + return false; +} + static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, struct vm_userfaultfd_ctx vm_ctx) { @@ -297,6 +318,11 @@ static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_wp_async(struct vm_area_struct *vma) +{ + return false; +} + #endif /* CONFIG_USERFAULTFD */ static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 0e652026b7..db15ac07f8 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -204,6 +204,16 @@ struct vdpa_map_file { * @vdev: vdpa device * @idx: virtqueue index * Returns u32: group id for this virtqueue + * @get_vq_desc_group: Get the group id for the descriptor table of + * a specific virtqueue (optional) + * @vdev: vdpa device + * @idx: virtqueue index + * Returns u32: group id for the descriptor table + * portion of this virtqueue. Could be different + * than the one from @get_vq_group, in which case + * the access to the descriptor table can be + * confined to a separate asid, isolating from + * the virtqueue's buffer address access. * @get_device_features: Get virtio features supported by the device * @vdev: vdpa device * Returns the virtio features support by the @@ -242,6 +252,17 @@ struct vdpa_map_file { * @reset: Reset device * @vdev: vdpa device * Returns integer: success (0) or error (< 0) + * @compat_reset: Reset device with compatibility quirks to + * accommodate older userspace. Only needed by + * parent driver which used to have bogus reset + * behaviour, and has to maintain such behaviour + * for compatibility with older userspace. + * Historically compliant driver only has to + * implement .reset, Historically non-compliant + * driver should implement both. + * @vdev: vdpa device + * @flags: compatibility quirks for reset + * Returns integer: success (0) or error (< 0) * @suspend: Suspend the device (optional) * @vdev: vdpa device * Returns integer: success (0) or error (< 0) @@ -317,6 +338,15 @@ struct vdpa_map_file { * @iova: iova to be unmapped * @size: size of the area * Returns integer: success (0) or error (< 0) + * @reset_map: Reset device memory mapping to the default + * state (optional) + * Needed for devices that are using device + * specific DMA translation and prefer mapping + * to be decoupled from the virtio life cycle, + * i.e. device .reset op does not reset mapping + * @vdev: vdpa device + * @asid: address space identifier + * Returns integer: success (0) or error (< 0) * @get_vq_dma_dev: Get the dma device for a specific * virtqueue (optional) * @vdev: vdpa device @@ -360,6 +390,7 @@ struct vdpa_config_ops { /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx); + u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx); u64 (*get_device_features)(struct vdpa_device *vdev); u64 (*get_backend_features)(const struct vdpa_device *vdev); int (*set_driver_features)(struct vdpa_device *vdev, u64 features); @@ -373,6 +404,8 @@ struct vdpa_config_ops { u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); int (*reset)(struct vdpa_device *vdev); + int (*compat_reset)(struct vdpa_device *vdev, u32 flags); +#define VDPA_RESET_F_CLEAN_MAP 1 int (*suspend)(struct vdpa_device *vdev); int (*resume)(struct vdpa_device *vdev); size_t (*get_config_size)(struct vdpa_device *vdev); @@ -394,6 +427,7 @@ struct vdpa_config_ops { u64 iova, u64 size, u64 pa, u32 perm, void *opaque); int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid, u64 iova, u64 size); + int (*reset_map)(struct vdpa_device *vdev, unsigned int asid); int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group, unsigned int asid); struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx); @@ -485,14 +519,17 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) return vdev->dma_dev; } -static inline int vdpa_reset(struct vdpa_device *vdev) +static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags) { const struct vdpa_config_ops *ops = vdev->config; int ret; down_write(&vdev->cf_lock); vdev->features_valid = false; - ret = ops->reset(vdev); + if (ops->compat_reset && flags) + ret = ops->compat_reset(vdev, flags); + else + ret = ops->reset(vdev); up_write(&vdev->cf_lock); return ret; } diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index 067ac1d789..a09e13a577 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -5,44 +5,48 @@ #include <linux/pci.h> #include <linux/virtio_pci.h> -struct virtio_pci_modern_common_cfg { - struct virtio_pci_common_cfg cfg; - - __le16 queue_notify_data; /* read-write */ - __le16 queue_reset; /* read-write */ -}; - +/** + * struct virtio_pci_modern_device - info for modern PCI virtio + * @pci_dev: Ptr to the PCI device struct + * @common: Position of the common capability in the PCI config + * @device: Device-specific data (non-legacy mode) + * @notify_base: Base of vq notifications (non-legacy mode) + * @notify_pa: Physical base of vq notifications + * @isr: Where to read and clear interrupt + * @notify_len: So we can sanity-check accesses + * @device_len: So we can sanity-check accesses + * @notify_map_cap: Capability for when we need to map notifications per-vq + * @notify_offset_multiplier: Multiply queue_notify_off by this value + * (non-legacy mode). + * @modern_bars: Bitmask of BARs + * @id: Device and vendor id + * @device_id_check: Callback defined before vp_modern_probe() to be used to + * verify the PCI device is a vendor's expected device rather + * than the standard virtio PCI device + * Returns the found device id or ERRNO + * @dma_mask: Optional mask instead of the traditional DMA_BIT_MASK(64), + * for vendor devices with DMA space address limitations + */ struct virtio_pci_modern_device { struct pci_dev *pci_dev; struct virtio_pci_common_cfg __iomem *common; - /* Device-specific data (non-legacy mode) */ void __iomem *device; - /* Base of vq notifications (non-legacy mode). */ void __iomem *notify_base; - /* Physical base of vq notifications */ resource_size_t notify_pa; - /* Where to read and clear interrupt */ u8 __iomem *isr; - /* So we can sanity-check accesses. */ size_t notify_len; size_t device_len; + size_t common_len; - /* Capability for when we need to map notifications per-vq. */ int notify_map_cap; - /* Multiply queue_notify_off by this value. (non-legacy mode). */ u32 notify_offset_multiplier; - int modern_bars; - struct virtio_device_id id; - /* optional check for vendor virtio device, returns dev_id or -ERRNO */ int (*device_id_check)(struct pci_dev *pdev); - - /* optional mask for devices with limited DMA space */ u64 dma_mask; }; diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index fbf30721ba..c82089dee0 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -12,6 +12,7 @@ struct virtio_vsock_skb_cb { bool reply; bool tap_delivered; + u32 offset; }; #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb)) @@ -159,6 +160,15 @@ struct virtio_transport { /* Takes ownership of the packet */ int (*send_pkt)(struct sk_buff *skb); + + /* Used in MSG_ZEROCOPY mode. Checks, that provided data + * (number of buffers) could be transmitted with zerocopy + * mode. If this callback is not implemented for the current + * transport - this means that this transport doesn't need + * extra checks and can perform zerocopy transmission by + * default. + */ + bool (*can_msgzerocopy)(int bufs_num); }; ssize_t diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h deleted file mode 100644 index e9c0cd36c4..0000000000 --- a/include/linux/vlynq.h +++ /dev/null @@ -1,149 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> - */ - -#ifndef __VLYNQ_H__ -#define __VLYNQ_H__ - -#include <linux/device.h> -#include <linux/types.h> - -struct module; - -#define VLYNQ_NUM_IRQS 32 - -struct vlynq_mapping { - u32 size; - u32 offset; -}; - -enum vlynq_divisor { - vlynq_div_auto = 0, - vlynq_ldiv1, - vlynq_ldiv2, - vlynq_ldiv3, - vlynq_ldiv4, - vlynq_ldiv5, - vlynq_ldiv6, - vlynq_ldiv7, - vlynq_ldiv8, - vlynq_rdiv1, - vlynq_rdiv2, - vlynq_rdiv3, - vlynq_rdiv4, - vlynq_rdiv5, - vlynq_rdiv6, - vlynq_rdiv7, - vlynq_rdiv8, - vlynq_div_external -}; - -struct vlynq_device_id { - u32 id; - enum vlynq_divisor divisor; - unsigned long driver_data; -}; - -struct vlynq_regs; -struct vlynq_device { - u32 id, dev_id; - int local_irq; - int remote_irq; - enum vlynq_divisor divisor; - u32 regs_start, regs_end; - u32 mem_start, mem_end; - u32 irq_start, irq_end; - int irq; - int enabled; - struct vlynq_regs *local; - struct vlynq_regs *remote; - struct device dev; -}; - -struct vlynq_driver { - char *name; - struct vlynq_device_id *id_table; - int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id); - void (*remove)(struct vlynq_device *dev); - struct device_driver driver; -}; - -struct plat_vlynq_ops { - int (*on)(struct vlynq_device *dev); - void (*off)(struct vlynq_device *dev); -}; - -static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv) -{ - return container_of(drv, struct vlynq_driver, driver); -} - -static inline struct vlynq_device *to_vlynq_device(struct device *device) -{ - return container_of(device, struct vlynq_device, dev); -} - -extern struct bus_type vlynq_bus_type; - -extern int __vlynq_register_driver(struct vlynq_driver *driver, - struct module *owner); - -static inline int vlynq_register_driver(struct vlynq_driver *driver) -{ - return __vlynq_register_driver(driver, THIS_MODULE); -} - -static inline void *vlynq_get_drvdata(struct vlynq_device *dev) -{ - return dev_get_drvdata(&dev->dev); -} - -static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data) -{ - dev_set_drvdata(&dev->dev, data); -} - -static inline u32 vlynq_mem_start(struct vlynq_device *dev) -{ - return dev->mem_start; -} - -static inline u32 vlynq_mem_end(struct vlynq_device *dev) -{ - return dev->mem_end; -} - -static inline u32 vlynq_mem_len(struct vlynq_device *dev) -{ - return dev->mem_end - dev->mem_start + 1; -} - -static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq) -{ - int irq = dev->irq_start + virq; - if ((irq < dev->irq_start) || (irq > dev->irq_end)) - return -EINVAL; - - return irq; -} - -static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq) -{ - if ((irq < dev->irq_start) || (irq > dev->irq_end)) - return -EINVAL; - - return irq - dev->irq_start; -} - -extern void vlynq_unregister_driver(struct vlynq_driver *driver); -extern int vlynq_enable_device(struct vlynq_device *dev); -extern void vlynq_disable_device(struct vlynq_device *dev); -extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, - struct vlynq_mapping *mapping); -extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, - struct vlynq_mapping *mapping); -extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq); -extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq); - -#endif /* __VLYNQ_H__ */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 5ec7739400..3473b66317 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -19,10 +19,9 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int /* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 -#define WQ_FLAG_BOOKMARK 0x04 -#define WQ_FLAG_CUSTOM 0x08 -#define WQ_FLAG_DONE 0x10 -#define WQ_FLAG_PRIORITY 0x20 +#define WQ_FLAG_CUSTOM 0x04 +#define WQ_FLAG_DONE 0x08 +#define WQ_FLAG_PRIORITY 0x10 /* * A single wait-queue entry structure: @@ -212,8 +211,6 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); -void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, - unsigned int mode, void *key, wait_queue_entry_t *bookmark); void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h index 45cd42f55d..429c7b6afe 100644 --- a/include/linux/watch_queue.h +++ b/include/linux/watch_queue.h @@ -32,7 +32,7 @@ struct watch_filter { DECLARE_BITMAP(type_filter, WATCH_TYPE__NR); }; u32 nr_filters; /* Number of filters */ - struct watch_type_filter filters[]; + struct watch_type_filter filters[] __counted_by(nr_filters); }; struct watch_queue { diff --git a/include/media/cec.h b/include/media/cec.h index 9c007f8356..53e4b2eb2b 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -275,7 +275,7 @@ struct cec_adapter { u32 sequence; - char input_phys[32]; + char input_phys[40]; }; static inline void *cec_get_drvdata(const struct cec_adapter *adap) diff --git a/include/media/mipi-csi2.h b/include/media/mipi-csi2.h index c3d8f12234..40fc026425 100644 --- a/include/media/mipi-csi2.h +++ b/include/media/mipi-csi2.h @@ -19,6 +19,7 @@ #define MIPI_CSI2_DT_NULL 0x10 #define MIPI_CSI2_DT_BLANKING 0x11 #define MIPI_CSI2_DT_EMBEDDED_8B 0x12 +#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */ #define MIPI_CSI2_DT_YUV420_8B 0x18 #define MIPI_CSI2_DT_YUV420_10B 0x19 #define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index e0a13505f8..d82dfdbf6e 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -284,7 +284,7 @@ struct video_device { struct v4l2_prio_state *prio; /* device info */ - char name[32]; + char name[64]; enum vfl_devnode_type vfl_type; enum vfl_devnode_direction vfl_dir; int minor; diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h index 8a8977a33e..f6f111fae3 100644 --- a/include/media/v4l2-device.h +++ b/include/media/v4l2-device.h @@ -13,8 +13,6 @@ #include <media/v4l2-subdev.h> #include <media/v4l2-dev.h> -#define V4L2_DEVICE_NAME_SIZE (20 + 16) - struct v4l2_ctrl_handler; /** @@ -49,7 +47,7 @@ struct v4l2_device { struct media_device *mdev; struct list_head subdevs; spinlock_t lock; - char name[V4L2_DEVICE_NAME_SIZE]; + char name[36]; void (*notify)(struct v4l2_subdev *sd, unsigned int notification, void *arg); struct v4l2_ctrl_handler *ctrl_handler; diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h index 4ffa914ade..3a0e258836 100644 --- a/include/media/v4l2-event.h +++ b/include/media/v4l2-event.h @@ -78,7 +78,7 @@ struct v4l2_subscribed_event { unsigned int elems; unsigned int first; unsigned int in_use; - struct v4l2_kevent events[]; + struct v4l2_kevent events[] __counted_by(elems); }; /** diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index b39586dfba..ed0a44b6ea 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h @@ -143,6 +143,9 @@ int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, * v4l2_pipeline_pm_get - Increase the use count of a pipeline * @entity: The root entity of a pipeline * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * * Update the use count of all entities in the pipeline and power entities on. * * This function is intended to be called in video node open. It uses @@ -157,6 +160,9 @@ int v4l2_pipeline_pm_get(struct media_entity *entity); * v4l2_pipeline_pm_put - Decrease the use count of a pipeline * @entity: The root entity of a pipeline * + * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM + * ON SUB-DEVICE DRIVERS INSTEAD. + * * Update the use count of all entities in the pipeline and power entities off. * * This function is intended to be called in video node release. It uses diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index d9fca929c1..c1f90c1223 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -446,7 +446,9 @@ enum v4l2_subdev_pre_streamon_flags { * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the * sub-device. Failure on stop will remove any resources acquired in * streaming start, while the error code is still returned by the driver. - * Also see call_s_stream wrapper in v4l2-subdev.c. + * The caller shall track the subdev state, and shall not start or stop an + * already started or stopped subdev. Also see call_s_stream wrapper in + * v4l2-subdev.c. * * @g_pixelaspect: callback to return the pixelaspect ratio. * @@ -822,8 +824,9 @@ struct v4l2_subdev_state { * operation shall fail if the pad index it has been called on * is not valid or in case of unrecoverable failures. * - * @set_routing: enable or disable data connection routes described in the - * subdevice routing table. + * @set_routing: Enable or disable data connection routes described in the + * subdevice routing table. Subdevs that implement this operation + * must set the V4L2_SUBDEV_FL_STREAMS flag. * * @enable_streams: Enable the streams defined in streams_mask on the given * source pad. Subdevs that implement this operation must use the active @@ -948,8 +951,6 @@ struct v4l2_subdev_internal_ops { void (*release)(struct v4l2_subdev *sd); }; -#define V4L2_SUBDEV_NAME_SIZE 32 - /* Set this flag if this subdev is a i2c device. */ #define V4L2_SUBDEV_FL_IS_I2C (1U << 0) /* Set this flag if this subdev is a spi device. */ @@ -1059,7 +1060,7 @@ struct v4l2_subdev { const struct v4l2_subdev_ops *ops; const struct v4l2_subdev_internal_ops *internal_ops; struct v4l2_ctrl_handler *ctrl_handler; - char name[V4L2_SUBDEV_NAME_SIZE]; + char name[52]; u32 grp_id; void *dev_priv; void *host_priv; diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h deleted file mode 100644 index 2e01b2e9a1..0000000000 --- a/include/media/videobuf-core.h +++ /dev/null @@ -1,233 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * generic helper functions for handling video4linux capture buffers - * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> - * - * Highly based on video-buf written originally by: - * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> - * (c) 2006 Ted Walther and John Sokol - */ - -#ifndef _VIDEOBUF_CORE_H -#define _VIDEOBUF_CORE_H - -#include <linux/poll.h> -#include <linux/videodev2.h> - -#define UNSET (-1U) - - -struct videobuf_buffer; -struct videobuf_queue; - -/* --------------------------------------------------------------------- */ - -/* - * A small set of helper functions to manage video4linux buffers. - * - * struct videobuf_buffer holds the data structures used by the helper - * functions, additionally some commonly used fields for v4l buffers - * (width, height, lists, waitqueue) are in there. That struct should - * be used as first element in the drivers buffer struct. - * - * about the mmap helpers (videobuf_mmap_*): - * - * The mmaper function allows to map any subset of contiguous buffers. - * This includes one mmap() call for all buffers (which the original - * video4linux API uses) as well as one mmap() for every single buffer - * (which v4l2 uses). - * - * If there is a valid mapping for a buffer, buffer->baddr/bsize holds - * userspace address + size which can be fed into the - * videobuf_dma_init_user function listed above. - * - */ - -struct videobuf_mapping { - unsigned int count; - struct videobuf_queue *q; -}; - -enum videobuf_state { - VIDEOBUF_NEEDS_INIT = 0, - VIDEOBUF_PREPARED = 1, - VIDEOBUF_QUEUED = 2, - VIDEOBUF_ACTIVE = 3, - VIDEOBUF_DONE = 4, - VIDEOBUF_ERROR = 5, - VIDEOBUF_IDLE = 6, -}; - -struct videobuf_buffer { - unsigned int i; - u32 magic; - - /* info about the buffer */ - unsigned int width; - unsigned int height; - unsigned int bytesperline; /* use only if != 0 */ - unsigned long size; - enum v4l2_field field; - enum videobuf_state state; - struct list_head stream; /* QBUF/DQBUF list */ - - /* touched by irq handler */ - struct list_head queue; - wait_queue_head_t done; - unsigned int field_count; - u64 ts; - - /* Memory type */ - enum v4l2_memory memory; - - /* buffer size */ - size_t bsize; - - /* buffer offset (mmap + overlay) */ - size_t boff; - - /* buffer addr (userland ptr!) */ - unsigned long baddr; - - /* for mmap'ed buffers */ - struct videobuf_mapping *map; - - /* Private pointer to allow specific methods to store their data */ - int privsize; - void *priv; -}; - -struct videobuf_queue_ops { - int (*buf_setup)(struct videobuf_queue *q, - unsigned int *count, unsigned int *size); - int (*buf_prepare)(struct videobuf_queue *q, - struct videobuf_buffer *vb, - enum v4l2_field field); - void (*buf_queue)(struct videobuf_queue *q, - struct videobuf_buffer *vb); - void (*buf_release)(struct videobuf_queue *q, - struct videobuf_buffer *vb); -}; - -#define MAGIC_QTYPE_OPS 0x12261003 - -/* Helper operations - device type dependent */ -struct videobuf_qtype_ops { - u32 magic; - - struct videobuf_buffer *(*alloc_vb)(size_t size); - void *(*vaddr) (struct videobuf_buffer *buf); - int (*iolock) (struct videobuf_queue *q, - struct videobuf_buffer *vb, - struct v4l2_framebuffer *fbuf); - int (*sync) (struct videobuf_queue *q, - struct videobuf_buffer *buf); - int (*mmap_mapper) (struct videobuf_queue *q, - struct videobuf_buffer *buf, - struct vm_area_struct *vma); -}; - -struct videobuf_queue { - struct mutex vb_lock; - struct mutex *ext_lock; - spinlock_t *irqlock; - struct device *dev; - - wait_queue_head_t wait; /* wait if queue is empty */ - - enum v4l2_buf_type type; - unsigned int msize; - enum v4l2_field field; - enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */ - struct videobuf_buffer *bufs[VIDEO_MAX_FRAME]; - const struct videobuf_queue_ops *ops; - struct videobuf_qtype_ops *int_ops; - - unsigned int streaming:1; - unsigned int reading:1; - - /* capture via mmap() + ioctl(QBUF/DQBUF) */ - struct list_head stream; - - /* capture via read() */ - unsigned int read_off; - struct videobuf_buffer *read_buf; - - /* driver private data */ - void *priv_data; -}; - -static inline void videobuf_queue_lock(struct videobuf_queue *q) -{ - if (!q->ext_lock) - mutex_lock(&q->vb_lock); -} - -static inline void videobuf_queue_unlock(struct videobuf_queue *q) -{ - if (!q->ext_lock) - mutex_unlock(&q->vb_lock); -} - -int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, - int non_blocking, int intr); -int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, - struct v4l2_framebuffer *fbuf); - -struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q); - -/* Used on videobuf-dvb */ -void *videobuf_queue_to_vaddr(struct videobuf_queue *q, - struct videobuf_buffer *buf); - -void videobuf_queue_core_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct videobuf_qtype_ops *int_ops, - struct mutex *ext_lock); -int videobuf_queue_is_busy(struct videobuf_queue *q); -void videobuf_queue_cancel(struct videobuf_queue *q); - -enum v4l2_field videobuf_next_field(struct videobuf_queue *q); -int videobuf_reqbufs(struct videobuf_queue *q, - struct v4l2_requestbuffers *req); -int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b); -int videobuf_qbuf(struct videobuf_queue *q, - struct v4l2_buffer *b); -int videobuf_dqbuf(struct videobuf_queue *q, - struct v4l2_buffer *b, int nonblocking); -int videobuf_streamon(struct videobuf_queue *q); -int videobuf_streamoff(struct videobuf_queue *q); - -void videobuf_stop(struct videobuf_queue *q); - -int videobuf_read_start(struct videobuf_queue *q); -void videobuf_read_stop(struct videobuf_queue *q); -ssize_t videobuf_read_stream(struct videobuf_queue *q, - char __user *data, size_t count, loff_t *ppos, - int vbihack, int nonblocking); -ssize_t videobuf_read_one(struct videobuf_queue *q, - char __user *data, size_t count, loff_t *ppos, - int nonblocking); -__poll_t videobuf_poll_stream(struct file *file, - struct videobuf_queue *q, - poll_table *wait); - -int videobuf_mmap_setup(struct videobuf_queue *q, - unsigned int bcount, unsigned int bsize, - enum v4l2_memory memory); -int __videobuf_mmap_setup(struct videobuf_queue *q, - unsigned int bcount, unsigned int bsize, - enum v4l2_memory memory); -int videobuf_mmap_free(struct videobuf_queue *q); -int videobuf_mmap_mapper(struct videobuf_queue *q, - struct vm_area_struct *vma); - -#endif diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h deleted file mode 100644 index 525883b2c5..0000000000 --- a/include/media/videobuf-dma-contig.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * helper functions for physically contiguous capture buffers - * - * The functions support hardware lacking scatter gather support - * (i.e. the buffers must be linear in physical memory) - * - * Copyright (c) 2008 Magnus Damm - */ -#ifndef _VIDEOBUF_DMA_CONTIG_H -#define _VIDEOBUF_DMA_CONTIG_H - -#include <linux/dma-mapping.h> -#include <media/videobuf-core.h> - -void videobuf_queue_dma_contig_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct mutex *ext_lock); - -dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf); -void videobuf_dma_contig_free(struct videobuf_queue *q, - struct videobuf_buffer *buf); - -#endif /* _VIDEOBUF_DMA_CONTIG_H */ diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h deleted file mode 100644 index 930ff8d454..0000000000 --- a/include/media/videobuf-dma-sg.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * helper functions for SG DMA video4linux capture buffers - * - * The functions expect the hardware being able to scatter gather - * (i.e. the buffers are not linear in physical memory, but fragmented - * into PAGE_SIZE chunks). They also assume the driver does not need - * to touch the video data. - * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> - * - * Highly based on video-buf written originally by: - * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> - * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> - * (c) 2006 Ted Walther and John Sokol - */ -#ifndef _VIDEOBUF_DMA_SG_H -#define _VIDEOBUF_DMA_SG_H - -#include <media/videobuf-core.h> - -/* --------------------------------------------------------------------- */ - -/* - * A small set of helper functions to manage buffers (both userland - * and kernel) for DMA. - * - * videobuf_dma_init_*() - * creates a buffer. The userland version takes a userspace - * pointer + length. The kernel version just wants the size and - * does memory allocation too using vmalloc_32(). - * - * videobuf_dma_*() - * see Documentation/core-api/dma-api-howto.rst, these functions to - * basically the same. The map function does also build a - * scatterlist for the buffer (and unmap frees it ...) - * - * videobuf_dma_free() - * no comment ... - * - */ - -struct videobuf_dmabuf { - u32 magic; - - /* for userland buffer */ - int offset; - size_t size; - struct page **pages; - - /* for kernel buffers */ - void *vaddr; - struct page **vaddr_pages; - dma_addr_t *dma_addr; - struct device *dev; - - /* for overlay buffers (pci-pci dma) */ - dma_addr_t bus_addr; - - /* common */ - struct scatterlist *sglist; - int sglen; - unsigned long nr_pages; - int direction; -}; - -struct videobuf_dma_sg_memory { - u32 magic; - - /* for mmap'ed buffers */ - struct videobuf_dmabuf dma; -}; - -/* - * Scatter-gather DMA buffer API. - * - * These functions provide a simple way to create a page list and a - * scatter-gather list from a kernel, userspace of physical address and map the - * memory for DMA operation. - * - * Despite the name, this is totally unrelated to videobuf, except that - * videobuf-dma-sg uses the same API internally. - */ -int videobuf_dma_free(struct videobuf_dmabuf *dma); - -int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma); -struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf); - -void *videobuf_sg_alloc(size_t size); - -void videobuf_queue_sg_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct mutex *ext_lock); - -#endif /* _VIDEOBUF_DMA_SG_H */ - diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h deleted file mode 100644 index e930dbb9d7..0000000000 --- a/include/media/videobuf-vmalloc.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * helper functions for vmalloc capture buffers - * - * The functions expect the hardware being able to scatter gather - * (i.e. the buffers are not linear in physical memory, but fragmented - * into PAGE_SIZE chunks). They also assume the driver does not need - * to touch the video data. - * - * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> - */ -#ifndef _VIDEOBUF_VMALLOC_H -#define _VIDEOBUF_VMALLOC_H - -#include <media/videobuf-core.h> - -/* --------------------------------------------------------------------- */ - -struct videobuf_vmalloc_memory { - u32 magic; - - void *vaddr; - - /* remap_vmalloc_range seems to need to run - * after mmap() on some cases */ - struct vm_area_struct *vma; -}; - -void videobuf_queue_vmalloc_init(struct videobuf_queue *q, - const struct videobuf_queue_ops *ops, - struct device *dev, - spinlock_t *irqlock, - enum v4l2_buf_type type, - enum v4l2_field field, - unsigned int msize, - void *priv, - struct mutex *ext_lock); - -void *videobuf_to_vmalloc(struct videobuf_buffer *buf); - -void videobuf_vmalloc_free(struct videobuf_buffer *buf); - -#endif diff --git a/include/net/Space.h b/include/net/Space.h index c29f3d51c0..ef42629f42 100644 --- a/include/net/Space.h +++ b/include/net/Space.h @@ -10,4 +10,3 @@ struct net_device *smc_init(int unit); struct net_device *cs89x0_probe(int unit); struct net_device *tc515_probe(int unit); struct net_device *lance_probe(int unit); -struct net_device *cops_probe(int unit); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 5531dd0806..0754c46322 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -15,6 +15,7 @@ struct key; struct sock; struct socket; struct rxrpc_call; +struct rxrpc_peer; enum rxrpc_abort_reason; enum rxrpc_interruptibility { @@ -41,13 +42,14 @@ void rxrpc_kernel_new_call_notification(struct socket *, rxrpc_notify_new_call_t, rxrpc_discard_new_call_t); struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, - struct sockaddr_rxrpc *srx, + struct rxrpc_peer *peer, struct key *key, unsigned long user_call_ID, s64 tx_total_len, u32 hard_timeout, gfp_t gfp, rxrpc_notify_rx_t notify_rx, + u16 service_id, bool upgrade, enum rxrpc_interruptibility interruptibility, unsigned int debug_id); @@ -60,9 +62,14 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32, int, enum rxrpc_abort_reason); void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call); void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call); -void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, - struct sockaddr_rxrpc *); -bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *); +struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock, + struct sockaddr_rxrpc *srx, gfp_t gfp); +void rxrpc_kernel_put_peer(struct rxrpc_peer *peer); +struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer); +struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call); +const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer); +const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer); +unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 49c4640027..afd40dce40 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -46,12 +46,6 @@ struct scm_stat { #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) -#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) -#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) -#define unix_state_lock_nested(s) \ - spin_lock_nested(&unix_sk(s)->lock, \ - SINGLE_DEPTH_NESTING) - /* The AF_UNIX socket */ struct unix_sock { /* WARNING: sk has to be the first member */ @@ -77,6 +71,20 @@ struct unix_sock { #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk) #define unix_peer(sk) (unix_sk(sk)->peer) +#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) +enum unix_socket_lock_class { + U_LOCK_NORMAL, + U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */ + U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */ +}; + +static inline void unix_state_lock_nested(struct sock *sk, + enum unix_socket_lock_class subclass) +{ + spin_lock_nested(&unix_sk(sk)->lock, subclass); +} + #define peer_wait peer_wq.wait long unix_inq_len(struct sock *sk); diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index dc3cb16835..535701efc1 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -177,6 +177,9 @@ struct vsock_transport { /* Read a single skb */ int (*read_skb)(struct vsock_sock *, skb_read_actor_t); + + /* Zero-copy. */ + bool (*msgzerocopy_allow)(void); }; /**** CORE ****/ @@ -241,4 +244,8 @@ static inline void __init vsock_bpf_build_proto(void) {} #endif +static inline bool vsock_msgzerocopy_allow(const struct vsock_transport *t) +{ + return t->msgzerocopy_allow && t->msgzerocopy_allow(); +} #endif /* __AF_VSOCK_H__ */ diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index aa90adc3b2..7ffa8c192c 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -541,7 +541,7 @@ static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk, return ERR_PTR(-EFAULT); } - skb->priority = sk->sk_priority; + skb->priority = READ_ONCE(sk->sk_priority); return skb; } diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index b83cfcf666..65dd286693 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1296,29 +1296,6 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, return NULL; } -static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *hdev, - __u8 handle) -{ - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c; - - rcu_read_lock(); - - list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) - continue; - - if (handle != BT_ISO_QOS_BIG_UNSET && handle == c->iso_qos.bcast.big) { - rcu_read_unlock(); - return c; - } - } - - rcu_read_unlock(); - - return NULL; -} - static inline struct hci_conn * hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) { @@ -1406,6 +1383,26 @@ static inline void hci_conn_hash_list_state(struct hci_dev *hdev, rcu_read_unlock(); } +static inline void hci_conn_hash_list_flag(struct hci_dev *hdev, + hci_conn_func_t func, __u8 type, + __u8 flag, void *data) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + if (!func) + return; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && test_bit(flag, &c->flags)) + func(c, data); + } + + rcu_read_unlock(); +} + static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 57eeb07aeb..6efbc21521 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -80,6 +80,8 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, u8 *data, u32 flags, u16 min_interval, u16 max_interval, u16 sync_interval); +int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance); + int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force); int hci_disable_advertising_sync(struct hci_dev *hdev); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 153a8c3e72..8f2c487618 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -76,6 +76,8 @@ struct wiphy; * @IEEE80211_CHAN_DISABLED: This channel is disabled. * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes * sending probe requests or beaconing. + * @IEEE80211_CHAN_PSD: Power spectral density (in dBm) is set for this + * channel. * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel * is not permitted. @@ -119,7 +121,7 @@ struct wiphy; enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, IEEE80211_CHAN_NO_IR = 1<<1, - /* hole at 1<<2 */ + IEEE80211_CHAN_PSD = 1<<2, IEEE80211_CHAN_RADAR = 1<<3, IEEE80211_CHAN_NO_HT40PLUS = 1<<4, IEEE80211_CHAN_NO_HT40MINUS = 1<<5, @@ -171,6 +173,7 @@ enum ieee80211_channel_flags { * on this channel. * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered. * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels. + * @psd: power spectral density (in dBm) */ struct ieee80211_channel { enum nl80211_band band; @@ -187,6 +190,7 @@ struct ieee80211_channel { enum nl80211_dfs_state dfs_state; unsigned long dfs_state_entered; unsigned int dfs_cac_ms; + s8 psd; }; /** @@ -410,6 +414,19 @@ struct ieee80211_sta_eht_cap { u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN]; }; +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ +#ifdef __CHECKER__ +/* + * This is used to mark the sband->iftype_data pointer which is supposed + * to be an array with special access semantics (per iftype), but a lot + * of code got it wrong in the past, so with this marking sparse will be + * noisy when the pointer is used directly. + */ +# define __iftd __attribute__((noderef, address_space(__iftype_data))) +#else +# define __iftd +#endif /* __CHECKER__ */ + /** * struct ieee80211_sband_iftype_data - sband data per interface type * @@ -543,10 +560,48 @@ struct ieee80211_supported_band { struct ieee80211_sta_s1g_cap s1g_cap; struct ieee80211_edmg edmg_cap; u16 n_iftype_data; - const struct ieee80211_sband_iftype_data *iftype_data; + const struct ieee80211_sband_iftype_data __iftd *iftype_data; }; /** + * _ieee80211_set_sband_iftype_data - set sband iftype data array + * @sband: the sband to initialize + * @iftd: the iftype data array pointer + * @n_iftd: the length of the iftype data array + * + * Set the sband iftype data array; use this where the length cannot + * be derived from the ARRAY_SIZE() of the argument, but prefer + * ieee80211_set_sband_iftype_data() where it can be used. + */ +static inline void +_ieee80211_set_sband_iftype_data(struct ieee80211_supported_band *sband, + const struct ieee80211_sband_iftype_data *iftd, + u16 n_iftd) +{ + sband->iftype_data = (const void __iftd __force *)iftd; + sband->n_iftype_data = n_iftd; +} + +/** + * ieee80211_set_sband_iftype_data - set sband iftype data array + * @sband: the sband to initialize + * @iftd: the iftype data array + */ +#define ieee80211_set_sband_iftype_data(sband, iftd) \ + _ieee80211_set_sband_iftype_data(sband, iftd, ARRAY_SIZE(iftd)) + +/** + * for_each_sband_iftype_data - iterate sband iftype data entries + * @sband: the sband whose iftype_data array to iterate + * @i: iterator counter + * @iftd: iftype data pointer to set + */ +#define for_each_sband_iftype_data(sband, i, iftd) \ + for (i = 0, iftd = (const void __force *)&(sband)->iftype_data[i]; \ + i < (sband)->n_iftype_data; \ + i++, iftd = (const void __force *)&(sband)->iftype_data[i]) + +/** * ieee80211_get_sband_iftype_data - return sband data for a given iftype * @sband: the sband to search for the STA on * @iftype: enum nl80211_iftype @@ -557,6 +612,7 @@ static inline const struct ieee80211_sband_iftype_data * ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, u8 iftype) { + const struct ieee80211_sband_iftype_data *data; int i; if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) @@ -565,10 +621,7 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, if (iftype == NL80211_IFTYPE_AP_VLAN) iftype = NL80211_IFTYPE_AP; - for (i = 0; i < sband->n_iftype_data; i++) { - const struct ieee80211_sband_iftype_data *data = - &sband->iftype_data[i]; - + for_each_sband_iftype_data(sband, i, data) { if (data->types_mask & BIT(iftype)) return data; } @@ -954,6 +1007,30 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy, enum nl80211_iftype iftype); /** + * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable and we + * can/need start CAC on such channel + * @wiphy: the wiphy to validate against + * @chandef: the channel definition to check + * + * Return: true if all channels available and at least + * one channel requires CAC (NL80211_DFS_USABLE) + */ +bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy, + const struct cfg80211_chan_def *chandef); + +/** + * cfg80211_chandef_dfs_cac_time - get the DFS CAC time (in ms) for given + * channel definition + * @wiphy: the wiphy to validate against + * @chandef: the channel definition to check + * + * Returns: DFS CAC time (in ms) which applies for this channel definition + */ +unsigned int +cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy, + const struct cfg80211_chan_def *chandef); + +/** * nl80211_send_chandef - sends the channel definition. * @msg: the msg to send channel definition * @chandef: the channel definition to check @@ -1289,6 +1366,7 @@ struct cfg80211_acl_data { * struct cfg80211_fils_discovery - FILS discovery parameters from * IEEE Std 802.11ai-2016, Annex C.3 MIB detail. * + * @update: Set to true if the feature configuration should be updated. * @min_interval: Minimum packet interval in TUs (0 - 10000) * @max_interval: Maximum packet interval in TUs (0 - 10000) * @tmpl_len: Template length @@ -1296,6 +1374,7 @@ struct cfg80211_acl_data { * frame headers. */ struct cfg80211_fils_discovery { + bool update; u32 min_interval; u32 max_interval; size_t tmpl_len; @@ -1306,6 +1385,7 @@ struct cfg80211_fils_discovery { * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe * response parameters in 6GHz. * + * @update: Set to true if the feature configuration should be updated. * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive * scanning @@ -1313,6 +1393,7 @@ struct cfg80211_fils_discovery { * @tmpl: Template data for probe response */ struct cfg80211_unsol_bcast_probe_resp { + bool update; u32 interval; size_t tmpl_len; const u8 *tmpl; @@ -1399,6 +1480,22 @@ struct cfg80211_ap_settings { u16 punct_bitmap; }; + +/** + * struct cfg80211_ap_update - AP configuration update + * + * Subset of &struct cfg80211_ap_settings, for updating a running AP. + * + * @beacon: beacon data + * @fils_discovery: FILS discovery transmission parameters + * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters + */ +struct cfg80211_ap_update { + struct cfg80211_beacon_data beacon; + struct cfg80211_fils_discovery fils_discovery; + struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; +}; + /** * struct cfg80211_csa_settings - channel switch settings * @@ -2346,7 +2443,7 @@ struct mesh_config { * @user_mpm: userspace handles all MPM functions * @dtim_period: DTIM period to use * @beacon_interval: beacon interval to use - * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] + * @mcast_rate: multicast rate for Mesh Node [6Mbps is the default for 802.11a] * @basic_rates: basic rates to use when creating the mesh * @beacon_rate: bitrate to be used for beacons * @userspace_handles_dfs: whether user space controls DFS operation, i.e. @@ -2487,7 +2584,6 @@ struct cfg80211_scan_6ghz_params { * @n_ssids: number of SSIDs * @channels: channels to scan on. * @n_channels: total number of channels to scan - * @scan_width: channel width for scanning * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @duration: how long to listen on each channel, in TUs. If @@ -2517,7 +2613,6 @@ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; - enum nl80211_bss_scan_width scan_width; const u8 *ie; size_t ie_len; u16 duration; @@ -2566,7 +2661,7 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) * or no match (RSSI only) * @rssi_thold: don't report scan results below this threshold (in s32 dBm) * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied - * for filtering out scan results received. Drivers advertize this support + * for filtering out scan results received. Drivers advertise this support * of band specific rssi based filtering through the feature capability * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band * specific rssi thresholds take precedence over rssi_thold, if specified. @@ -2612,14 +2707,13 @@ struct cfg80211_bss_select_adjust { * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) * @n_ssids: number of SSIDs * @n_channels: total number of channels to scan - * @scan_width: channel width for scanning * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @flags: control flags from &enum nl80211_scan_flags * @match_sets: sets of parameters to be matched for a scan result * entry to be considered valid and to be passed to the host * (others are filtered out). - * If ommited, all results are passed. + * If omitted, all results are passed. * @n_match_sets: number of match sets * @report_results: indicates that results were reported for this request * @wiphy: the wiphy this was for @@ -2653,14 +2747,13 @@ struct cfg80211_bss_select_adjust { * to the specified band while deciding whether a better BSS is reported * using @relative_rssi. If delta is a negative number, the BSSs that * belong to the specified band will be penalized by delta dB in relative - * comparisions. + * comparisons. */ struct cfg80211_sched_scan_request { u64 reqid; struct cfg80211_ssid *ssids; int n_ssids; u32 n_channels; - enum nl80211_bss_scan_width scan_width; const u8 *ie; size_t ie_len; u32 flags; @@ -2708,7 +2801,6 @@ enum cfg80211_signal_type { /** * struct cfg80211_inform_bss - BSS inform data * @chan: channel the frame was received on - * @scan_width: scan width that was used * @signal: signal strength value, according to the wiphy's * signal type * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was @@ -2728,7 +2820,6 @@ enum cfg80211_signal_type { */ struct cfg80211_inform_bss { struct ieee80211_channel *chan; - enum nl80211_bss_scan_width scan_width; s32 signal; u64 boottime_ns; u64 parent_tsf; @@ -2762,7 +2853,6 @@ struct cfg80211_bss_ies { * for use in scan results and similar. * * @channel: channel this BSS is on - * @scan_width: width of the control channel * @bssid: BSSID of the BSS * @beacon_interval: the beacon interval as from the frame * @capability: the capability field in host byte order @@ -2775,6 +2865,8 @@ struct cfg80211_bss_ies { * own the beacon_ies, but they're just pointers to the ones from the * @hidden_beacon_bss struct) * @proberesp_ies: the information elements from the last Probe Response frame + * @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame, + * cannot rely on it having valid data * @hidden_beacon_bss: in case this BSS struct represents a probe response from * a BSS that hides the SSID in its beacon, this points to the BSS struct * that holds the beacon data. @beacon_ies is still valid, of course, and @@ -2792,7 +2884,6 @@ struct cfg80211_bss_ies { */ struct cfg80211_bss { struct ieee80211_channel *channel; - enum nl80211_bss_scan_width scan_width; const struct cfg80211_bss_ies __rcu *ies; const struct cfg80211_bss_ies __rcu *beacon_ies; @@ -2811,6 +2902,8 @@ struct cfg80211_bss { u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; + u8 proberesp_ecsa_stuck:1; + u8 bssid_index; u8 max_bssid_indicator; @@ -2891,12 +2984,15 @@ struct cfg80211_auth_request { * @elems_len: length of the elements * @disabled: If set this link should be included during association etc. but it * should not be used until enabled by the AP MLD. + * @error: per-link error code, must be <= 0. If there is an error, then the + * operation as a whole must fail. */ struct cfg80211_assoc_link { struct cfg80211_bss *bss; const u8 *elems; size_t elems_len; bool disabled; + int error; }; /** @@ -3495,7 +3591,7 @@ struct cfg80211_update_ft_ies_params { * This structure provides information needed to transmit a mgmt frame * * @chan: channel to use - * @offchan: indicates wether off channel operation is required + * @offchan: indicates whether off channel operation is required * @wait: duration for ROC * @buf: buffer to transmit * @len: buffer length @@ -3613,7 +3709,7 @@ struct cfg80211_nan_func_filter { * @publish_bcast: if true, the solicited publish should be broadcasted * @subscribe_active: if true, the subscribe is active * @followup_id: the instance ID for follow up - * @followup_reqid: the requestor instance ID for follow up + * @followup_reqid: the requester instance ID for follow up * @followup_dest: MAC address of the recipient of the follow up * @ttl: time to live counter in DW. * @serv_spec_info: Service Specific Info @@ -4450,7 +4546,7 @@ struct cfg80211_ops { int (*start_ap)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings); int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_beacon_data *info); + struct cfg80211_ap_update *info); int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id); @@ -4816,6 +4912,8 @@ struct cfg80211_ops { * @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys. * @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for * NL80211_REGDOM_SET_BY_DRIVER. + * @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver + * set this flag to update channels on beacon hints. */ enum wiphy_flags { WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0), @@ -4842,6 +4940,7 @@ enum wiphy_flags { WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24), + WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = BIT(25), }; /** @@ -5882,7 +5981,7 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy, /** * wiphy_delayed_work_flush - flush previously queued delayed work * @wiphy: the wiphy, for debug purposes - * @work: the work to flush + * @dwork: the delayed work to flush * * Flush the work (i.e. run it if pending). This must be called * under the wiphy mutex acquired by wiphy_lock(). @@ -5938,8 +6037,6 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy, * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_need_update: mgmt registrations were updated, * need to propagate the update to the driver - * @mtx: mutex used to lock data in this struct, may be used by drivers - * and some API functions require it held * @beacon_interval: beacon interval used on this device for transmitting * beacons, 0 when not valid * @address: The address for this device, valid only if @netdev is %NULL @@ -5986,8 +6083,6 @@ struct wireless_dev { struct list_head mgmt_registrations; u8 mgmt_registrations_need_update:1; - struct mutex mtx; - bool use_4addr, is_running, registered, registering; u8 address[ETH_ALEN] __aligned(sizeof(u16)); @@ -6278,13 +6373,11 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, /** * ieee80211_mandatory_rates - get mandatory rates for a given band * @sband: the band to look for rates in - * @scan_width: width of the control channel * * This function returns a bitmap of the mandatory rates for the given * band, bits are set according to the rate position in the bitrates array. */ -u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, - enum nl80211_bss_scan_width scan_width); +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband); /* * Radiotap parsing functions -- for controlled injection support @@ -6625,7 +6718,7 @@ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) * @ies: data consisting of IEs * @len: length of data * - * Return: %NULL if the etended element could not be found or if + * Return: %NULL if the extended element could not be found or if * the element is invalid (claims to be longer than the given * data) or if the byte array doesn't match; otherwise return the * requested element struct. @@ -6772,7 +6865,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2); /** * regulatory_set_wiphy_regd - set regdom info for self managed drivers * @wiphy: the wireless device we want to process the regulatory domain on - * @rd: the regulatory domain informatoin to use for this wiphy + * @rd: the regulatory domain information to use for this wiphy * * Set the regulatory domain information for self-managed wiphys, only they * may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more @@ -6863,7 +6956,7 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy); * Regulatory self-managed driver can use it to proactively * * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried. - * @freq: the freqency(in MHz) to be queried. + * @freq: the frequency (in MHz) to be queried. * @rule: pointer to store the wmm rule from the regulatory db. * * Self-managed wireless drivers can use this function to query @@ -6946,22 +7039,6 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, gfp_t gfp); static inline struct cfg80211_bss * __must_check -cfg80211_inform_bss_width_frame(struct wiphy *wiphy, - struct ieee80211_channel *rx_channel, - enum nl80211_bss_scan_width scan_width, - struct ieee80211_mgmt *mgmt, size_t len, - s32 signal, gfp_t gfp) -{ - struct cfg80211_inform_bss data = { - .chan = rx_channel, - .scan_width = scan_width, - .signal = signal, - }; - - return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); -} - -static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, struct ieee80211_mgmt *mgmt, size_t len, @@ -6969,7 +7046,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, { struct cfg80211_inform_bss data = { .chan = rx_channel, - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, }; @@ -7072,26 +7148,6 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, gfp_t gfp); static inline struct cfg80211_bss * __must_check -cfg80211_inform_bss_width(struct wiphy *wiphy, - struct ieee80211_channel *rx_channel, - enum nl80211_bss_scan_width scan_width, - enum cfg80211_bss_frame_type ftype, - const u8 *bssid, u64 tsf, u16 capability, - u16 beacon_interval, const u8 *ie, size_t ielen, - s32 signal, gfp_t gfp) -{ - struct cfg80211_inform_bss data = { - .chan = rx_channel, - .scan_width = scan_width, - .signal = signal, - }; - - return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, - capability, beacon_interval, ie, ielen, - gfp); -} - -static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum cfg80211_bss_frame_type ftype, @@ -7101,7 +7157,6 @@ cfg80211_inform_bss(struct wiphy *wiphy, { struct cfg80211_inform_bss data = { .chan = rx_channel, - .scan_width = NL80211_BSS_CHAN_WIDTH_20, .signal = signal, }; @@ -7186,19 +7241,6 @@ void cfg80211_bss_iter(struct wiphy *wiphy, void *data), void *iter_data); -static inline enum nl80211_bss_scan_width -cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef) -{ - switch (chandef->width) { - case NL80211_CHAN_WIDTH_5: - return NL80211_BSS_CHAN_WIDTH_5; - case NL80211_CHAN_WIDTH_10: - return NL80211_BSS_CHAN_WIDTH_10; - default: - return NL80211_BSS_CHAN_WIDTH_20; - } -} - /** * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame * @dev: network device @@ -7231,7 +7273,7 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); /** - * struct cfg80211_rx_assoc_resp - association response data + * struct cfg80211_rx_assoc_resp_data - association response data * @bss: the BSS that association was requested with, ownership of the pointer * moves to cfg80211 in the call to cfg80211_rx_assoc_resp() * @buf: (Re)Association Response frame (header + body) @@ -7246,7 +7288,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); * @links.status: Set this (along with a BSS pointer) for links that * were rejected by the AP. */ -struct cfg80211_rx_assoc_resp { +struct cfg80211_rx_assoc_resp_data { const u8 *buf; size_t len; const u8 *req_ies; @@ -7263,7 +7305,7 @@ struct cfg80211_rx_assoc_resp { /** * cfg80211_rx_assoc_resp - notification of processed association response * @dev: network device - * @data: association response data, &struct cfg80211_rx_assoc_resp + * @data: association response data, &struct cfg80211_rx_assoc_resp_data * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). @@ -7271,7 +7313,7 @@ struct cfg80211_rx_assoc_resp { * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_rx_assoc_resp(struct net_device *dev, - struct cfg80211_rx_assoc_resp *data); + struct cfg80211_rx_assoc_resp_data *data); /** * struct cfg80211_assoc_failure - association failure data @@ -7990,7 +8032,8 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, * cfg80211_port_authorized - notify cfg80211 of successful security association * * @dev: network device - * @bssid: the BSSID of the AP + * @peer_addr: BSSID of the AP/P2P GO in case of STA/GC or STA/GC MAC address + * in case of AP/P2P GO * @td_bitmap: transition disable policy * @td_bitmap_len: Length of transition disable policy * @gfp: allocation flags @@ -8001,8 +8044,11 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, * should be preceded with a call to cfg80211_connect_result(), * cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to * indicate the 802.11 association. + * This function can also be called by AP/P2P GO driver that supports + * authentication offload. In this case the peer_mac passed is that of + * associated STA/GC. */ -void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid, +void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr, const u8* td_bitmap, u8 td_bitmap_len, gfp_t gfp); /** @@ -8591,7 +8637,7 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, * @link_id: the link ID for MLO, must be 0 for non-MLO * @punct_bitmap: the new puncturing bitmap * - * Caller must acquire wdev_lock, therefore must only be called from sleepable + * Caller must hold wiphy mutex, therefore must only be called from sleepable * driver context! */ void cfg80211_ch_switch_notify(struct net_device *dev, @@ -8831,6 +8877,18 @@ static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen, } /** + * ieee80211_fragment_element - fragment the last element in skb + * @skb: The skbuf that the element was added to + * @len_pos: Pointer to length of the element to fragment + * @frag_id: The element ID to use for fragments + * + * This function fragments all data after @len_pos, adding fragmentation + * elements with the given ID as appropriate. The SKB will grow in size + * accordingly. + */ +void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id); + +/** * cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN * @wdev: the wireless device reporting the wakeup * @wakeup: the wakeup report @@ -9079,9 +9137,9 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, /** * cfg80211_assoc_comeback - notification of association that was - * temporarly rejected with a comeback + * temporarily rejected with a comeback * @netdev: network device - * @ap_addr: AP (MLD) address that rejected the assocation + * @ap_addr: AP (MLD) address that rejected the association * @timeout: timeout interval value TUs. * * this function may sleep. the caller must hold the corresponding wdev's mutex. @@ -9245,4 +9303,50 @@ bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap, */ void cfg80211_links_removed(struct net_device *dev, u16 link_mask); +#ifdef CONFIG_CFG80211_DEBUGFS +/** + * wiphy_locked_debugfs_read - do a locked read in debugfs + * @wiphy: the wiphy to use + * @file: the file being read + * @buf: the buffer to fill and then read from + * @bufsize: size of the buffer + * @userbuf: the user buffer to copy to + * @count: read count + * @ppos: read position + * @handler: the read handler to call (under wiphy lock) + * @data: additional data to pass to the read handler + */ +ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsize, + char __user *userbuf, size_t count, + loff_t *ppos, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t bufsize, + void *data), + void *data); + +/** + * wiphy_locked_debugfs_write - do a locked write in debugfs + * @wiphy: the wiphy to use + * @file: the file being written to + * @buf: the buffer to copy the user data to + * @bufsize: size of the buffer + * @userbuf: the user buffer to copy from + * @count: read count + * @handler: the write handler to call (under wiphy lock) + * @data: additional data to pass to the write handler + */ +ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsize, + const char __user *userbuf, size_t count, + ssize_t (*handler)(struct wiphy *wiphy, + struct file *file, + char *buf, + size_t count, + void *data), + void *data); +#endif + #endif /* __NET_CFG80211_H */ diff --git a/include/net/devlink.h b/include/net/devlink.h index 29fd1b4ee6..9ac394bdfb 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -150,6 +150,7 @@ struct devlink_port { struct devlink_rate *devlink_rate; struct devlink_linecard *linecard; + u32 rel_index; }; struct devlink_port_new_attrs { @@ -1697,6 +1698,8 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller, u16 pf, u32 sf, bool external); +int devl_port_fn_devlink_set(struct devlink_port *devlink_port, + struct devlink *fn_devlink); struct devlink_rate * devl_rate_node_create(struct devlink *devlink, void *priv, char *node_name, struct devlink_rate *parent); @@ -1717,8 +1720,8 @@ void devlink_linecard_provision_clear(struct devlink_linecard *linecard); void devlink_linecard_provision_fail(struct devlink_linecard *linecard); void devlink_linecard_activate(struct devlink_linecard *linecard); void devlink_linecard_deactivate(struct devlink_linecard *linecard); -void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard, - struct devlink *nested_devlink); +int devlink_linecard_nested_dl_set(struct devlink_linecard *linecard, + struct devlink *nested_devlink); int devl_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, @@ -1851,36 +1854,36 @@ int devlink_info_version_running_put_ext(struct devlink_info_req *req, const char *version_value, enum devlink_info_version_type version_type); -int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg); -int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg); - -int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name); -int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg); - -int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg, - const char *name); -int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg); -int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, - const char *name); -int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg); - -int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value); -int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value); -int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, - u16 value_len); - -int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, - bool value); -int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name, - u8 value); -int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name, - u32 value); -int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name, - u64 value); -int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name, - const char *value); -int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name, - const void *value, u32 value_len); +void devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg); +void devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg); + +void devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name); +void devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg); + +void devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg, + const char *name); +void devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg); +void devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, + const char *name); +void devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg); + +void devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value); +void devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value); +void devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, + u16 value_len); + +void devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, + bool value); +void devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name, + u8 value); +void devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name, + u32 value); +void devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name, + u64 value); +void devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name, + const char *value); +void devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name, + const void *value, u32 value_len); struct devlink_health_reporter * devl_port_health_reporter_create(struct devlink_port *port, @@ -1918,6 +1921,8 @@ devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, void devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter); +int devl_nested_devlink_set(struct devlink *devlink, + struct devlink *nested_devlink); bool devlink_is_reload_failed(const struct devlink *devlink); void devlink_remote_reload_actions_performed(struct devlink *devlink, enum devlink_reload_limit limit, diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index a587e83fc1..3c70ad53a4 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -20,9 +20,14 @@ FN(IP_NOPROTO) \ FN(SOCKET_RCVBUFF) \ FN(PROTO_MEM) \ + FN(TCP_AUTH_HDR) \ FN(TCP_MD5NOTFOUND) \ FN(TCP_MD5UNEXPECTED) \ FN(TCP_MD5FAILURE) \ + FN(TCP_AONOTFOUND) \ + FN(TCP_AOUNEXPECTED) \ + FN(TCP_AOKEYNOTFOUND) \ + FN(TCP_AOFAILURE) \ FN(SOCKET_BACKLOG) \ FN(TCP_FLAGS) \ FN(TCP_ZEROWINDOW) \ @@ -80,6 +85,7 @@ FN(IPV6_NDISC_BAD_OPTIONS) \ FN(IPV6_NDISC_NS_OTHERHOST) \ FN(QUEUE_PURGE) \ + FN(TC_ERROR) \ FNe(MAX) /** @@ -142,6 +148,11 @@ enum skb_drop_reason { */ SKB_DROP_REASON_PROTO_MEM, /** + * @SKB_DROP_REASON_TCP_AUTH_HDR: TCP-MD5 or TCP-AO hashes are met + * twice or set incorrectly. + */ + SKB_DROP_REASON_TCP_AUTH_HDR, + /** * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected, * corresponding to LINUX_MIB_TCPMD5NOTFOUND */ @@ -157,6 +168,26 @@ enum skb_drop_reason { */ SKB_DROP_REASON_TCP_MD5FAILURE, /** + * @SKB_DROP_REASON_TCP_AONOTFOUND: no TCP-AO hash and one was expected, + * corresponding to LINUX_MIB_TCPAOREQUIRED + */ + SKB_DROP_REASON_TCP_AONOTFOUND, + /** + * @SKB_DROP_REASON_TCP_AOUNEXPECTED: TCP-AO hash is present and it + * was not expected, corresponding to LINUX_MIB_TCPAOKEYNOTFOUND + */ + SKB_DROP_REASON_TCP_AOUNEXPECTED, + /** + * @SKB_DROP_REASON_TCP_AOKEYNOTFOUND: TCP-AO key is unknown, + * corresponding to LINUX_MIB_TCPAOKEYNOTFOUND + */ + SKB_DROP_REASON_TCP_AOKEYNOTFOUND, + /** + * @SKB_DROP_REASON_TCP_AOFAILURE: TCP-AO hash is wrong, + * corresponding to LINUX_MIB_TCPAOBAD + */ + SKB_DROP_REASON_TCP_AOFAILURE, + /** * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog ( * see LINUX_MIB_TCPBACKLOGDROP) */ @@ -345,6 +376,8 @@ enum skb_drop_reason { SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST, /** @SKB_DROP_REASON_QUEUE_PURGE: bulk free. */ SKB_DROP_REASON_QUEUE_PURGE, + /** @SKB_DROP_REASON_TC_ERROR: generic internal tc error. */ + SKB_DROP_REASON_TC_ERROR, /** * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which * shouldn't be used as a real 'reason' - only for tracing code gen diff --git a/include/net/dsa.h b/include/net/dsa.h index 0b9c6aa270..82135fbdb1 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -102,11 +102,11 @@ struct dsa_device_ops { const char *name; enum dsa_tag_protocol proto; /* Some tagging protocols either mangle or shift the destination MAC - * address, in which case the DSA master would drop packets on ingress + * address, in which case the DSA conduit would drop packets on ingress * if what it understands out of the destination MAC address is not in * its RX filter. */ - bool promisc_on_master; + bool promisc_on_conduit; }; struct dsa_lag { @@ -236,12 +236,12 @@ struct dsa_bridge { }; struct dsa_port { - /* A CPU port is physically connected to a master device. - * A user port exposed to userspace has a slave device. + /* A CPU port is physically connected to a conduit device. A user port + * exposes a network device to user-space, called 'user' here. */ union { - struct net_device *master; - struct net_device *slave; + struct net_device *conduit; + struct net_device *user; }; /* Copy of the tagging protocol operations, for quicker access @@ -249,7 +249,7 @@ struct dsa_port { */ const struct dsa_device_ops *tag_ops; - /* Copies for faster access in master receive hot path */ + /* Copies for faster access in conduit receive hot path */ struct dsa_switch_tree *dst; struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); @@ -281,9 +281,9 @@ struct dsa_port { u8 lag_tx_enabled:1; - /* Master state bits, valid only on CPU ports */ - u8 master_admin_up:1; - u8 master_oper_up:1; + /* conduit state bits, valid only on CPU ports */ + u8 conduit_admin_up:1; + u8 conduit_oper_up:1; /* Valid only on user ports */ u8 cpu_port_in_lag:1; @@ -303,7 +303,7 @@ struct dsa_port { struct list_head list; /* - * Original copy of the master netdev ethtool_ops + * Original copy of the conduit netdev ethtool_ops */ const struct ethtool_ops *orig_ethtool_ops; @@ -452,10 +452,10 @@ struct dsa_switch { const struct dsa_switch_ops *ops; /* - * Slave mii_bus and devices for the individual ports. + * User mii_bus and devices for the individual ports. */ u32 phys_mii_mask; - struct mii_bus *slave_mii_bus; + struct mii_bus *user_mii_bus; /* Ageing Time limits in msecs */ unsigned int ageing_time_min; @@ -520,10 +520,10 @@ static inline bool dsa_port_is_unused(struct dsa_port *dp) return dp->type == DSA_PORT_TYPE_UNUSED; } -static inline bool dsa_port_master_is_operational(struct dsa_port *dp) +static inline bool dsa_port_conduit_is_operational(struct dsa_port *dp) { - return dsa_port_is_cpu(dp) && dp->master_admin_up && - dp->master_oper_up; + return dsa_port_is_cpu(dp) && dp->conduit_admin_up && + dp->conduit_oper_up; } static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) @@ -713,12 +713,12 @@ static inline bool dsa_port_offloads_lag(struct dsa_port *dp, return dsa_port_lag_dev_get(dp) == lag->dev; } -static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp) +static inline struct net_device *dsa_port_to_conduit(const struct dsa_port *dp) { if (dp->cpu_port_in_lag) return dsa_port_lag_dev_get(dp->cpu_dp); - return dp->cpu_dp->master; + return dp->cpu_dp->conduit; } static inline @@ -732,7 +732,7 @@ struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) else if (dp->hsr_dev) return dp->hsr_dev; - return dp->slave; + return dp->user; } static inline struct net_device * @@ -834,9 +834,9 @@ struct dsa_switch_ops { int (*connect_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol proto); - int (*port_change_master)(struct dsa_switch *ds, int port, - struct net_device *master, - struct netlink_ext_ack *extack); + int (*port_change_conduit)(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack); /* Optional switch-wide initialization and destruction methods */ int (*setup)(struct dsa_switch *ds); @@ -969,6 +969,16 @@ struct dsa_switch_ops { struct phy_device *phy); void (*port_disable)(struct dsa_switch *ds, int port); + + /* + * Notification for MAC address changes on user ports. Drivers can + * currently only veto operations. They should not use the method to + * program the hardware, since the operation is not rolled back in case + * of other errors. + */ + int (*port_set_mac_address)(struct dsa_switch *ds, int port, + const unsigned char *addr); + /* * Compatibility between device trees defining multiple CPU ports and * drivers which are not OK to use by default the numerically smallest @@ -1198,7 +1208,8 @@ struct dsa_switch_ops { * HSR integration */ int (*port_hsr_join)(struct dsa_switch *ds, int port, - struct net_device *hsr); + struct net_device *hsr, + struct netlink_ext_ack *extack); int (*port_hsr_leave)(struct dsa_switch *ds, int port, struct net_device *hsr); @@ -1222,11 +1233,11 @@ struct dsa_switch_ops { int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); /* - * DSA master tracking operations + * DSA conduit tracking operations */ - void (*master_state_change)(struct dsa_switch *ds, - const struct net_device *master, - bool operational); + void (*conduit_state_change)(struct dsa_switch *ds, + const struct net_device *conduit, + bool operational); }; #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ @@ -1363,9 +1374,9 @@ static inline int dsa_switch_resume(struct dsa_switch *ds) #endif /* CONFIG_PM_SLEEP */ #if IS_ENABLED(CONFIG_NET_DSA) -bool dsa_slave_dev_check(const struct net_device *dev); +bool dsa_user_dev_check(const struct net_device *dev); #else -static inline bool dsa_slave_dev_check(const struct net_device *dev) +static inline bool dsa_user_dev_check(const struct net_device *dev) { return false; } diff --git a/include/net/dsa_stubs.h b/include/net/dsa_stubs.h index 361811750a..6f384897f2 100644 --- a/include/net/dsa_stubs.h +++ b/include/net/dsa_stubs.h @@ -13,14 +13,14 @@ extern const struct dsa_stubs *dsa_stubs; struct dsa_stubs { - int (*master_hwtstamp_validate)(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack); + int (*conduit_hwtstamp_validate)(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); }; -static inline int dsa_master_hwtstamp_validate(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack) +static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { if (!netdev_uses_dsa(dev)) return 0; @@ -29,18 +29,18 @@ static inline int dsa_master_hwtstamp_validate(struct net_device *dev, * netdev_uses_dsa() returns true, the dsa_core module is still * registered, and so, dsa_unregister_stubs() couldn't have run. * For netdev_uses_dsa() to start returning false, it would imply that - * dsa_master_teardown() has executed, which requires rtnl_lock(). + * dsa_conduit_teardown() has executed, which requires rtnl_lock(). */ ASSERT_RTNL(); - return dsa_stubs->master_hwtstamp_validate(dev, config, extack); + return dsa_stubs->conduit_hwtstamp_validate(dev, config, extack); } #else -static inline int dsa_master_hwtstamp_validate(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack) +static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { return 0; } diff --git a/include/net/dst.h b/include/net/dst.h index 78884429de..f5dfc8fb7b 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -222,13 +222,6 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr return msecs_to_jiffies(dst_metric(dst, metric)); } -static inline u32 -dst_allfrag(const struct dst_entry *dst) -{ - int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); - return ret; -} - static inline int dst_metric_locked(const struct dst_entry *dst, int metric) { @@ -392,10 +385,10 @@ static inline int dst_discard(struct sk_buff *skb) { return dst_discard_out(&init_net, skb->sk, skb); } -void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, +void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_obsolete, unsigned short flags); void dst_init(struct dst_entry *dst, struct dst_ops *ops, - struct net_device *dev, int initial_ref, int initial_obsolete, + struct net_device *dev, int initial_obsolete, unsigned short flags); struct dst_entry *dst_destroy(struct dst_entry *dst); void dst_dev_put(struct dst_entry *dst); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 9efa9a59e8..314087a5e1 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -333,7 +333,7 @@ struct flow_action_entry { struct flow_action { unsigned int num_entries; - struct flow_action_entry entries[]; + struct flow_action_entry entries[] __counted_by(num_entries); }; static inline bool flow_action_has_entries(const struct flow_action *action) diff --git a/include/net/gro.h b/include/net/gro.h index 88644b3ca6..b435f0ddbf 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -41,7 +41,7 @@ struct napi_gro_cb { /* Number of segments aggregated. */ u16 count; - /* Used in ipv6_gro_receive() and foo-over-udp */ + /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */ u16 proto; /* Used in napi_gro_cb::free */ diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 2338f8d2a8..925bac726a 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -539,6 +539,12 @@ enum ieee80211_radiotap_eht_usig_common { IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK = 0x00000080, IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER = 0x00007000, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW = 0x00038000, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ = 0, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ = 1, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ = 2, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ = 3, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1 = 4, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_2 = 5, IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL = 0x00040000, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR = 0x01f80000, IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP = 0xfe000000, diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 31bf475eca..f07642264c 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -85,7 +85,7 @@ struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; - struct in6_addr sl_addr[]; + struct in6_addr sl_addr[] __counted_by(sl_max); }; #define IP6_SFBLOCK 10 /* allocate this many at once */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 01a73bf74f..9ab4bf704e 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -44,7 +44,6 @@ struct inet_connection_sock_af_ops { struct request_sock *req_unhash, bool *own_req); u16 net_header_len; - u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); @@ -114,7 +113,10 @@ struct inet_connection_sock { __u8 quick; /* Scheduled number of quick acks */ __u8 pingpong; /* The session is interactive */ __u8 retry; /* Number of attempts */ - __u32 ato; /* Predicted tick of soft clock */ + #define ATO_BITS 8 + __u32 ato:ATO_BITS, /* Predicted tick of soft clock */ + lrcv_flowlabel:20, /* last received ipv6 flowlabel */ + unused:4; unsigned long timeout; /* Currently scheduled timeout */ __u32 lrcvtime; /* timestamp of last received data packet */ __u16 last_seg_size; /* Size of last incoming segment */ @@ -325,11 +327,10 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); -#define TCP_PINGPONG_THRESH 1 - static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { - inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH; + inet_csk(sk)->icsk_ack.pingpong = + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); } static inline void inet_csk_exit_pingpong_mode(struct sock *sk) @@ -339,7 +340,16 @@ static inline void inet_csk_exit_pingpong_mode(struct sock *sk) static inline bool inet_csk_in_pingpong_mode(struct sock *sk) { - return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; + return inet_csk(sk)->icsk_ack.pingpong >= + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); +} + +static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + if (icsk->icsk_ack.pingpong < U8_MAX) + icsk->icsk_ack.pingpong++; } static inline bool inet_csk_has_ulp(const struct sock *sk) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 2790ba58ff..8d5fe15b0f 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -244,7 +244,6 @@ struct inet_sock { }; #define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ -#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */ enum { INET_FLAGS_PKTINFO = 0, @@ -268,6 +267,16 @@ enum { INET_FLAGS_NODEFRAG = 17, INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, INET_FLAGS_DEFER_CONNECT = 19, + INET_FLAGS_MC6_LOOP = 20, + INET_FLAGS_RECVERR6_RFC4884 = 21, + INET_FLAGS_MC6_ALL = 22, + INET_FLAGS_AUTOFLOWLABEL_SET = 23, + INET_FLAGS_AUTOFLOWLABEL = 24, + INET_FLAGS_DONTFRAG = 25, + INET_FLAGS_RECVERR6 = 26, + INET_FLAGS_REPFLOW = 27, + INET_FLAGS_RTALERT_ISOLATE = 28, + INET_FLAGS_SNDFLOW = 29, }; /* cmsg flags for inet */ diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 4a8e578405..b14999ff55 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -67,7 +67,8 @@ struct inet_timewait_sock { /* And these are ours. */ unsigned int tw_transparent : 1, tw_flowlabel : 20, - tw_pad : 3, /* 3 bits hole */ + tw_usec_ts : 1, + tw_pad : 2, /* 2 bits hole */ tw_tos : 8; u32 tw_txhash; u32 tw_priority; diff --git a/include/net/ip.h b/include/net/ip.h index 3489a1cca5..6390dd08da 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -258,7 +258,7 @@ static inline u8 ip_sendmsg_scope(const struct inet_sock *inet, static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) { - return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); + return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos)); } /* datagram.c */ @@ -434,19 +434,22 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) static inline bool ip_sk_accept_pmtu(const struct sock *sk) { - return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && - inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); + + return pmtudisc != IP_PMTUDISC_INTERFACE && + pmtudisc != IP_PMTUDISC_OMIT; } static inline bool ip_sk_use_pmtu(const struct sock *sk) { - return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; + return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE; } static inline bool ip_sk_ignore_df(const struct sock *sk) { - return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || - inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); + + return pmtudisc < IP_PMTUDISC_DO || pmtudisc == IP_PMTUDISC_OMIT; } static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, @@ -758,7 +761,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); * Functions provided by ip_sockglue.c */ -void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); +void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst); void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, int tlen, int offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index b32539bb0f..28b0657902 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -53,13 +53,12 @@ struct route_info { */ static inline int rt6_srcprefs2flags(unsigned int srcprefs) { - /* No need to bitmask because srcprefs have only 3 bits. */ - return srcprefs << 3; + return (srcprefs & IPV6_PREFER_SRC_MASK) << 3; } static inline unsigned int rt6_flags2srcprefs(int flags) { - return (flags >> 3) & 7; + return (flags >> 3) & IPV6_PREFER_SRC_MASK; } static inline bool rt6_need_strict(const struct in6_addr *daddr) @@ -266,7 +265,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb) const struct dst_entry *dst = skb_dst(skb); unsigned int mtu; - if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) { + if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) { mtu = READ_ONCE(dst->dev->mtu); mtu -= lwtunnel_headroom(dst->lwtstate, mtu); } else { @@ -277,14 +276,18 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb) static inline bool ip6_sk_accept_pmtu(const struct sock *sk) { - return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE && - inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); + + return pmtudisc != IPV6_PMTUDISC_INTERFACE && + pmtudisc != IPV6_PMTUDISC_OMIT; } static inline bool ip6_sk_ignore_df(const struct sock *sk) { - return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || - inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; + u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); + + return pmtudisc < IPV6_PMTUDISC_DO || + pmtudisc == IPV6_PMTUDISC_OMIT; } static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 15de07d365..d4667b7797 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -157,7 +157,7 @@ struct fib_info { bool pfsrc_removed; struct nexthop *nh; struct rcu_head rcu; - struct fib_nh fib_nh[]; + struct fib_nh fib_nh[] __counted_by(fib_nhs); }; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index c6932d1a3f..78d38dd88a 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -373,12 +373,12 @@ static inline void ipcm6_init(struct ipcm6_cookie *ipc6) } static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, - const struct ipv6_pinfo *np) + const struct sock *sk) { *ipc6 = (struct ipcm6_cookie) { .hlimit = -1, - .tclass = np->tclass, - .dontfrag = np->dontfrag, + .tclass = inet6_sk(sk)->tclass, + .dontfrag = inet6_test_bit(DONTFRAG, sk), }; } @@ -428,7 +428,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); void ip6_flowlabel_cleanup(void); -bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np); +bool ip6_autoflowlabel(struct net *net, const struct sock *sk); static inline void fl6_sock_release(struct ip6_flowlabel *fl) { @@ -914,9 +914,9 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, int hlimit; if (ipv6_addr_is_multicast(&fl6->daddr)) - hlimit = np->mcast_hops; + hlimit = READ_ONCE(np->mcast_hops); else - hlimit = np->hop_limit; + hlimit = READ_ONCE(np->hop_limit); if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); return hlimit; @@ -1133,12 +1133,6 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool connected); -struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb, - struct net_device *dev, - struct net *net, struct socket *sock, - struct in6_addr *saddr, - const struct ip_tunnel_info *info, - u8 protocol, bool use_cache); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *orig_dst); @@ -1303,15 +1297,16 @@ static inline int ip6_sock_set_v6only(struct sock *sk) static inline void ip6_sock_set_recverr(struct sock *sk) { - lock_sock(sk); - inet6_sk(sk)->recverr = true; - release_sock(sk); + inet6_set_bit(RECVERR6, sk); } -static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) +#define IPV6_PREFER_SRC_MASK (IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBLIC | \ + IPV6_PREFER_SRC_COA) + +static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val) { + unsigned int prefmask = ~IPV6_PREFER_SRC_MASK; unsigned int pref = 0; - unsigned int prefmask = ~0; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC | @@ -1361,20 +1356,11 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) return -EINVAL; } - inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref; + WRITE_ONCE(inet6_sk(sk)->srcprefs, + (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref); return 0; } -static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val) -{ - int ret; - - lock_sock(sk); - ret = __ip6_sock_set_addr_preferences(sk, val); - release_sock(sk); - return ret; -} - static inline void ip6_sock_set_recvpktinfo(struct sock *sk) { lock_sock(sk); diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index c48186bf47..485c39a898 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -60,6 +60,9 @@ struct ipv6_stub { #if IS_ENABLED(CONFIG_XFRM) void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu); int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb); + struct sk_buff *(*xfrm6_gro_udp_encap_rcv)(struct sock *sk, + struct list_head *head, + struct sk_buff *skb); int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); #endif @@ -85,6 +88,11 @@ struct ipv6_bpf_stub { sockptr_t optval, unsigned int optlen); int (*ipv6_getsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); + int (*ipv6_dev_get_saddr)(struct net *net, + const struct net_device *dst_dev, + const struct in6_addr *daddr, + unsigned int prefs, + struct in6_addr *saddr); }; extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7c707358d1..580781ff9d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -79,7 +79,7 @@ * helpers for sanity checking. Drivers must ensure all work added onto the * mac80211 workqueue should be cancelled on the driver stop() callback. * - * mac80211 will flushed the workqueue upon interface removal and during + * mac80211 will flush the workqueue upon interface removal and during * suspend. * * All work performed on the mac80211 workqueue must not acquire the RTNL lock. @@ -138,7 +138,7 @@ * field to the frame RX timestamp and report the ack TX timestamp in the * ieee80211_rx_status struct. * - * Similarly, To report hardware timestamps for Timing Measurement or Fine + * Similarly, to report hardware timestamps for Timing Measurement or Fine * Timing Measurement frame TX, the driver should set the SKB's hwtstamp field * to the frame TX timestamp and report the ack RX timestamp in the * ieee80211_tx_status struct. @@ -341,6 +341,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response * status changed. * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed. + * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed. */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -376,6 +377,7 @@ enum ieee80211_bss_change { BSS_CHANGED_FILS_DISCOVERY = 1<<30, BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31, BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32), + BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33), /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -643,9 +645,7 @@ struct ieee80211_fils_discovery { * @pwr_reduction: power constraint of BSS. * @eht_support: does this BSS support EHT * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS - * @csa_active: marks whether a channel switch is going on. Internally it is - * write-protected by sdata_lock and local->mtx so holding either is fine - * for read access. + * @csa_active: marks whether a channel switch is going on. * @csa_punct_bitmap: new puncturing bitmap for channel switch * @mu_mimo_owner: indicates interface owns MU-MIMO capability * @chanctx_conf: The channel context this interface is assigned to, or %NULL @@ -653,9 +653,7 @@ struct ieee80211_fils_discovery { * path needing to access it; even though the netdev carrier will always * be off when it is %NULL there can still be races and packets could be * processed after it switches back to %NULL. - * @color_change_active: marks whether a color change is ongoing. Internally it is - * write-protected by sdata_lock and local->mtx so holding either is fine - * for read access. + * @color_change_active: marks whether a color change is ongoing. * @color_change_color: the bss color that will be used after the change. * @ht_ldpc: in AP mode, indicates interface has HT LDPC capability. * @vht_ldpc: in AP mode, indicates interface has VHT LDPC capability. @@ -1082,6 +1080,11 @@ struct ieee80211_tx_rate { #define IEEE80211_MAX_TX_RETRY 31 +static inline bool ieee80211_rate_valid(struct ieee80211_tx_rate *rate) +{ + return rate->idx >= 0 && rate->count > 0; +} + static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate, u8 mcs, u8 nss) { @@ -1115,7 +1118,9 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate) * not valid if the interface is an MLD since we won't know which * link the frame will be transmitted on * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC - * @ack_frame_id: internal frame ID for TX status, used internally + * @status_data: internal data for TX status handling, assigned privately, + * see also &enum ieee80211_status_data for the internal documentation + * @status_data_idr: indicates status data is IDR allocated ID for ack frame * @tx_time_est: TX time estimate in units of 4us, used internally * @control: union part for control data * @control.rates: TX rates array to try @@ -1155,10 +1160,11 @@ struct ieee80211_tx_info { /* common information */ u32 flags; u32 band:3, - ack_frame_id:13, + status_data_idr:1, + status_data:13, hw_queue:4, tx_time_est:10; - /* 2 free bits */ + /* 1 free bit */ union { struct { @@ -1172,7 +1178,11 @@ struct ieee80211_tx_info { u8 use_cts_prot:1; u8 short_preamble:1; u8 skip_table:1; - /* 2 bytes free */ + + /* for injection only (bitmap) */ + u8 antennas:2; + + /* 14 bits free */ }; /* only needed before rate control */ unsigned long jiffies; @@ -1757,15 +1767,15 @@ struct ieee80211_channel_switch { * @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes * and send P2P_PS notification to the driver if NOA changed, even * this is not pure P2P vif. - * @IEEE80211_VIF_DISABLE_SMPS_OVERRIDE: disable user configuration of - * SMPS mode via debugfs. + * @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is + * enabled for the interface. */ enum ieee80211_vif_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2), IEEE80211_VIF_GET_NOA_UPDATE = BIT(3), - IEEE80211_VIF_DISABLE_SMPS_OVERRIDE = BIT(4), + IEEE80211_VIF_EML_ACTIVE = BIT(4), }; @@ -1938,7 +1948,7 @@ static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif) for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \ if ((!(vif)->active_links || \ (vif)->active_links & BIT(link_id)) && \ - (link = rcu_dereference((vif)->link_conf[link_id]))) + (link = link_conf_dereference_check(vif, link_id))) static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) { @@ -1971,22 +1981,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); */ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); -/** - * lockdep_vif_mutex_held - for lockdep checks on link poiners - * @vif: the interface to check - */ -static inline bool lockdep_vif_mutex_held(struct ieee80211_vif *vif) +static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif) { - return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->mtx); + return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->wiphy->mtx); } #define link_conf_dereference_protected(vif, link_id) \ rcu_dereference_protected((vif)->link_conf[link_id], \ - lockdep_vif_mutex_held(vif)) + lockdep_vif_wiphy_mutex_held(vif)) #define link_conf_dereference_check(vif, link_id) \ rcu_dereference_check((vif)->link_conf[link_id], \ - lockdep_vif_mutex_held(vif)) + lockdep_vif_wiphy_mutex_held(vif)) /** * enum ieee80211_key_flags - key flags @@ -2393,7 +2399,7 @@ static inline bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta) for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \ if ((!(vif)->active_links || \ (vif)->active_links & BIT(link_id)) && \ - ((link_sta) = link_sta_dereference_protected(sta, link_id))) + ((link_sta) = link_sta_dereference_check(sta, link_id))) /** * enum sta_notify_cmd - sta notify command @@ -3056,7 +3062,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * The set_key() call for the %SET_KEY command should return 0 if * the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be * added; if you return 0 then hw_key_idx must be assigned to the - * hardware key index, you are free to use the full u8 range. + * hardware key index. You are free to use the full u8 range. * * Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is * set, mac80211 will not automatically fall back to software crypto if @@ -3066,7 +3072,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * When the cmd is %DISABLE_KEY then it must succeed. * * Note that it is permissible to not decrypt a frame even if a key - * for it has been uploaded to hardware, the stack will not make any + * for it has been uploaded to hardware. The stack will not make any * decision based on whether a key has been uploaded or not but rather * based on the receive flags. * @@ -3081,7 +3087,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * The update_tkip_key() call updates the driver with the new phase 1 key. * This happens every time the iv16 wraps around (every 65536 packets). The * set_key() call will happen only once for each key (unless the AP did - * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is + * rekeying); it will not include a valid phase 1 key. The valid phase 1 key is * provided by update_tkip_key only. The trigger that makes mac80211 call this * handler is software decryption with wrap around of iv16. * @@ -3108,7 +3114,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * * mac80211 has support for various powersave implementations. * - * First, it can support hardware that handles all powersaving by itself, + * First, it can support hardware that handles all powersaving by itself; * such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware * flag. In that case, it will be told about the desired powersave mode * with the %IEEE80211_CONF_PS flag depending on the association status. @@ -3133,12 +3139,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still * required to pass up beacons. The hardware is still required to handle * waking up for multicast traffic; if it cannot the driver must handle that - * as best as it can, mac80211 is too slow to do that. + * as best as it can; mac80211 is too slow to do that. * * Dynamic powersave is an extension to normal powersave in which the * hardware stays awake for a user-specified period of time after sending a * frame so that reply frames need not be buffered and therefore delayed to - * the next wakeup. It's compromise of getting good enough latency when + * the next wakeup. It's a compromise of getting good enough latency when * there's data traffic and still saving significantly power in idle * periods. * @@ -3203,7 +3209,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * Note that change, for the sake of simplification, also includes information * elements appearing or disappearing from the beacon. * - * Some hardware supports an "ignore list" instead, just make sure nothing + * Some hardware supports an "ignore list" instead. Just make sure nothing * that was requested is on the ignore list, and include commonly changing * information element IDs in the ignore list, for example 11 (BSS load) and * the various vendor-assigned IEs with unknown contents (128, 129, 133-136, @@ -3214,7 +3220,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * In addition to these capabilities, hardware should support notifying the * host of changes in the beacon RSSI. This is relevant to implement roaming * when no traffic is flowing (when traffic is flowing we see the RSSI of - * the received data packets). This can consist in notifying the host when + * the received data packets). This can consist of notifying the host when * the RSSI changes significantly or when it drops below or rises above * configurable thresholds. In the future these thresholds will also be * configured by mac80211 (which gets them from userspace) to implement @@ -3361,8 +3367,8 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * period starts for any reason, @release_buffered_frames is called * with the number of frames to be released and which TIDs they are * to come from. In this case, the driver is responsible for setting - * the EOSP (for uAPSD) and MORE_DATA bits in the released frames, - * to help the @more_data parameter is passed to tell the driver if + * the EOSP (for uAPSD) and MORE_DATA bits in the released frames. + * To help the @more_data parameter is passed to tell the driver if * there is more data on other TIDs -- the TIDs to release frames * from are ignored since mac80211 doesn't know how many frames the * buffers for those TIDs contain. @@ -3411,7 +3417,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * Additionally, the driver has to then use these HW queue IDs for the queue * management functions (ieee80211_stop_queue() et al.) * - * The driver is free to set up the queue mappings as needed, multiple virtual + * The driver is free to set up the queue mappings as needed; multiple virtual * interfaces may map to the same hardware queues if needed. The setup has to * happen during add_interface or change_interface callbacks. For example, a * driver supporting station+station and station+AP modes might decide to have @@ -3635,11 +3641,14 @@ enum ieee80211_reconfig_type { * @success: whether the frame exchange was successful, only * used with the mgd_complete_tx() method, and then only * valid for auth and (re)assoc. + * @link_id: the link id on which the frame will be TX'ed. + * Only used with the mgd_prepare_tx() method. */ struct ieee80211_prep_tx_info { u16 duration; u16 subtype; u8 success:1; + int link_id; }; /** @@ -3863,6 +3872,10 @@ struct ieee80211_prep_tx_info { * the station. See @sta_pre_rcu_remove if needed. * This callback can sleep. * + * @vif_add_debugfs: Drivers can use this callback to add a debugfs vif + * directory with its files. This callback should be within a + * CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep. + * * @link_add_debugfs: Drivers can use this callback to add debugfs files * when a link is added to a mac80211 vif. This callback should be within * a CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep. @@ -4067,11 +4080,15 @@ struct ieee80211_prep_tx_info { * This callback must be atomic. * * @get_et_sset_count: Ethtool API to get string-set count. + * Note that the wiphy mutex is not held for this callback since it's + * expected to return a static value. * * @get_et_stats: Ethtool API to get a set of u64 stats. * * @get_et_strings: Ethtool API to get a set of strings to describe stats * and perhaps other supported types of ethtool data-sets. + * Note that the wiphy mutex is not held for this callback since it's + * expected to return a static value. * * @mgd_prepare_tx: Prepare for transmitting a management frame for association * before associated. In multi-channel scenarios, a virtual interface is @@ -4358,6 +4375,8 @@ struct ieee80211_ops { int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); #ifdef CONFIG_MAC80211_DEBUGFS + void (*vif_add_debugfs)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); void (*link_add_debugfs)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, @@ -4506,7 +4525,8 @@ struct ieee80211_ops { struct ieee80211_prep_tx_info *info); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + unsigned int link_id); int (*add_chanctx)(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); @@ -4544,7 +4564,8 @@ struct ieee80211_ops { struct ieee80211_channel_switch *ch_switch); int (*post_channel_switch)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); void (*abort_channel_switch)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw, @@ -4890,7 +4911,7 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw); * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * This function must be called with BHs disabled and RCU read lock * @@ -4915,7 +4936,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta, * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * This function must be called with BHs disabled. * @@ -4940,7 +4961,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta, * for a single hardware must be synchronized against each other. Calls to * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be * mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * In process context use instead ieee80211_rx_ni(). * @@ -4960,7 +4981,7 @@ static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) * * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not * be mixed for a single hardware.Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call @@ -4975,7 +4996,7 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); * * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may * not be mixed for a single hardware. Must not run concurrently with - * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * ieee80211_tx_status_skb() or ieee80211_tx_status_ni(). * * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call @@ -5151,7 +5172,7 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw, struct ieee80211_tx_info *info); /** - * ieee80211_tx_status - transmit status callback + * ieee80211_tx_status_skb - transmit status callback * * Call this function for all transmitted frames after they have been * transmitted. It is permissible to not call this function for @@ -5166,13 +5187,13 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw, * @hw: the hardware the frame was transmitted by * @skb: the frame that was transmitted, owned by mac80211 after this call */ -void ieee80211_tx_status(struct ieee80211_hw *hw, - struct sk_buff *skb); +void ieee80211_tx_status_skb(struct ieee80211_hw *hw, + struct sk_buff *skb); /** * ieee80211_tx_status_ext - extended transmit status callback * - * This function can be used as a replacement for ieee80211_tx_status + * This function can be used as a replacement for ieee80211_tx_status_skb() * in drivers that may want to provide extra information that does not * fit into &struct ieee80211_tx_info. * @@ -5189,7 +5210,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, /** * ieee80211_tx_status_noskb - transmit status callback without skb * - * This function can be used as a replacement for ieee80211_tx_status + * This function can be used as a replacement for ieee80211_tx_status_skb() * in drivers that cannot reliably map tx status information back to * specific skbs. * @@ -5217,9 +5238,9 @@ static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, /** * ieee80211_tx_status_ni - transmit status callback (in process context) * - * Like ieee80211_tx_status() but can be called in process context. + * Like ieee80211_tx_status_skb() but can be called in process context. * - * Calls to this function, ieee80211_tx_status() and + * Calls to this function, ieee80211_tx_status_skb() and * ieee80211_tx_status_irqsafe() may not be mixed * for a single hardware. * @@ -5230,17 +5251,17 @@ static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { local_bh_disable(); - ieee80211_tx_status(hw, skb); + ieee80211_tx_status_skb(hw, skb); local_bh_enable(); } /** * ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback * - * Like ieee80211_tx_status() but can be called in IRQ context + * Like ieee80211_tx_status_skb() but can be called in IRQ context * (internally defers to a tasklet.) * - * Calls to this function, ieee80211_tx_status() and + * Calls to this function, ieee80211_tx_status_skb() and * ieee80211_tx_status_ni() may not be mixed for a single hardware. * * @hw: the hardware the frame was transmitted by @@ -6542,11 +6563,14 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw); * ieee80211_chswitch_done - Complete channel switch process * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @success: make the channel switch successful or not + * @link_id: the link_id on which the switch was done. Ignored if success is + * false. * * Complete the channel switch post-process: set the new operational channel * and wake up the suspended queues. */ -void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); +void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success, + unsigned int link_id); /** * ieee80211_channel_switch_disconnect - disconnect due to channel switch error @@ -7242,7 +7266,7 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, * * This function is used to check whether given txq is allowed to transmit by * the airtime scheduler, and can be used by drivers to access the airtime - * fairness accounting without going using the scheduling order enfored by + * fairness accounting without using the scheduling order enforced by * next_txq(). * * Returns %true if the airtime scheduler thinks the TXQ should be allowed to diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h index 3d3b5c881b..158b125692 100644 --- a/include/net/mana/hw_channel.h +++ b/include/net/mana/hw_channel.h @@ -121,7 +121,7 @@ struct hwc_dma_buf { u32 gpa_mkey; u32 num_reqs; - struct hwc_work_request reqs[]; + struct hwc_work_request reqs[] __counted_by(num_reqs); }; typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 4d43adf186..6e3e9c1363 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -339,7 +339,7 @@ struct mana_rxq { /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. */ - struct mana_recv_buf_oob rx_oobs[]; + struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); }; struct mana_tx_qp { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index eb6cd43b17..13b3a4e29f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -368,21 +368,30 @@ static inline void put_net_track(struct net *net, netns_tracker *tracker) typedef struct { #ifdef CONFIG_NET_NS - struct net *net; + struct net __rcu *net; #endif } possible_net_t; static inline void write_pnet(possible_net_t *pnet, struct net *net) { #ifdef CONFIG_NET_NS - pnet->net = net; + rcu_assign_pointer(pnet->net, net); #endif } static inline struct net *read_pnet(const possible_net_t *pnet) { #ifdef CONFIG_NET_NS - return pnet->net; + return rcu_dereference_protected(pnet->net, true); +#else + return &init_net; +#endif +} + +static inline struct net *read_pnet_rcu(possible_net_t *pnet) +{ +#ifdef CONFIG_NET_NS + return rcu_dereference(pnet->net); #else return &init_net; #endif diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 4085765c33..cba3ccf03f 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -160,10 +160,6 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct) return read_pnet(&ct->ct_net); } -/* Alter reply tuple (maybe alter helper). */ -void nf_conntrack_alter_reply(struct nf_conn *ct, - const struct nf_conntrack_tuple *newreply); - /* Is this tuple taken? (ignoring any belonging to the given conntrack). */ int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, @@ -284,6 +280,16 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb) return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; } +static inline void nf_conntrack_alter_reply(struct nf_conn *ct, + const struct nf_conntrack_tuple *newreply) +{ + /* Must be unconfirmed, so not in hash table yet */ + if (WARN_ON(nf_ct_is_confirmed(ct))) + return; + + ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; +} + #define nfct_time_stamp ((u32)(jiffies)) /* jiffies until ct expires, 0 if already expired */ diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index fcb19a4e8f..6903f72bcc 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -39,7 +39,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) #ifdef CONFIG_NF_CONNTRACK_LABELS struct net *net = nf_ct_net(ct); - if (net->ct.labels_used == 0) + if (atomic_read(&net->ct.labels_used) == 0) return NULL; return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 692d595591..4a767b3d20 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -275,7 +275,7 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, } void flow_offload_route_init(struct flow_offload *flow, - const struct nf_flow_route *route); + struct nf_flow_route *route); int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); void flow_offload_refresh(struct nf_flowtable *flow_table, diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 75972e211b..62013d0184 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -274,6 +274,9 @@ struct nft_userdata { unsigned char data[]; }; +/* placeholder structure for opaque set element backend representation. */ +struct nft_elem_priv { }; + /** * struct nft_set_elem - generic representation of set elements * @@ -294,9 +297,14 @@ struct nft_set_elem { u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; struct nft_data val; } data; - void *priv; + struct nft_elem_priv *priv; }; +static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv) +{ + return (void *)priv; +} + struct nft_set; struct nft_set_iter { u8 genmask; @@ -306,7 +314,7 @@ struct nft_set_iter { int (*fn)(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, - struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); }; /** @@ -430,7 +438,8 @@ struct nft_set_ops { const struct nft_set_ext **ext); bool (*update)(struct nft_set *set, const u32 *key, - void *(*new)(struct nft_set *, + struct nft_elem_priv * + (*new)(struct nft_set *, const struct nft_expr *, struct nft_regs *), const struct nft_expr *expr, @@ -442,27 +451,27 @@ struct nft_set_ops { int (*insert)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, - struct nft_set_ext **ext); + struct nft_elem_priv **priv); void (*activate)(const struct net *net, const struct nft_set *set, - const struct nft_set_elem *elem); - void * (*deactivate)(const struct net *net, + struct nft_elem_priv *elem_priv); + struct nft_elem_priv * (*deactivate)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem); - bool (*flush)(const struct net *net, + void (*flush)(const struct net *net, const struct nft_set *set, - void *priv); + struct nft_elem_priv *priv); void (*remove)(const struct net *net, const struct nft_set *set, - const struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); void (*walk)(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter); - void * (*get)(const struct net *net, + struct nft_elem_priv * (*get)(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags); - void (*commit)(const struct nft_set *set); + void (*commit)(struct nft_set *set); void (*abort)(const struct nft_set *set); u64 (*privsize)(const struct nlattr * const nla[], const struct nft_set_desc *desc); @@ -789,16 +798,22 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS); } -static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) +static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext, + u64 tstamp) { return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) && - time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext)); + time_after_eq64(tstamp, *nft_set_ext_expiration(ext)); +} + +static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) +{ + return __nft_set_elem_expired(ext, get_jiffies_64()); } static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set, - void *elem) + const struct nft_elem_priv *elem_priv) { - return elem + set->ops->elemsize; + return (void *)elem_priv + set->ops->elemsize; } static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext) @@ -810,16 +825,19 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx, const struct nft_set *set, const struct nlattr *attr); -void *nft_set_elem_init(const struct nft_set *set, - const struct nft_set_ext_tmpl *tmpl, - const u32 *key, const u32 *key_end, const u32 *data, - u64 timeout, u64 expiration, gfp_t gfp); +struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set, + const struct nft_set_ext_tmpl *tmpl, + const u32 *key, const u32 *key_end, + const u32 *data, + u64 timeout, u64 expiration, gfp_t gfp); int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, struct nft_expr *expr_array[]); -void nft_set_elem_destroy(const struct nft_set *set, void *elem, +void nft_set_elem_destroy(const struct nft_set *set, + const struct nft_elem_priv *elem_priv, bool destroy_expr); void nf_tables_set_elem_destroy(const struct nft_ctx *ctx, - const struct nft_set *set, void *elem); + const struct nft_set *set, + const struct nft_elem_priv *elem_priv); struct nft_expr_ops; /** @@ -1061,7 +1079,7 @@ struct nft_chain { int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain); int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, - struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set); int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain); void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain); @@ -1198,10 +1216,13 @@ static inline void nft_use_inc_restore(u32 *use) * @hgenerator: handle generator state * @handle: table handle * @use: number of chain references to this table + * @family:address family * @flags: table flag (see enum nft_table_flags) * @genmask: generation mask - * @afinfo: address family info + * @nlpid: netlink port ID * @name: name of the table + * @udlen: length of the user data + * @udata: user data * @validate_state: internal, set when transaction adds jumps */ struct nft_table { @@ -1307,6 +1328,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, * @type: stateful object numeric type * @owner: module owner * @maxattr: maximum netlink attribute + * @family: address family for AF-specific object types * @policy: netlink attribute policy */ struct nft_object_type { @@ -1316,6 +1338,7 @@ struct nft_object_type { struct list_head list; u32 type; unsigned int maxattr; + u8 family; struct module *owner; const struct nla_policy *policy; }; @@ -1635,14 +1658,14 @@ struct nft_trans_table { struct nft_trans_elem { struct nft_set *set; - struct nft_set_elem elem; + struct nft_elem_priv *elem_priv; bool bound; }; #define nft_trans_elem_set(trans) \ (((struct nft_trans_elem *)trans->data)->set) -#define nft_trans_elem(trans) \ - (((struct nft_trans_elem *)trans->data)->elem) +#define nft_trans_elem_priv(trans) \ + (((struct nft_trans_elem *)trans->data)->elem_priv) #define nft_trans_elem_set_bound(trans) \ (((struct nft_trans_elem *)trans->data)->bound) @@ -1683,7 +1706,7 @@ struct nft_trans_gc { struct nft_set *set; u32 seq; u16 count; - void *priv[NFT_TRANS_GC_BATCHCOUNT]; + struct nft_elem_priv *priv[NFT_TRANS_GC_BATCHCOUNT]; struct rcu_head rcu; }; @@ -1706,7 +1729,7 @@ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc); void nft_setelem_data_deactivate(const struct net *net, const struct nft_set *set, - struct nft_set_elem *elem); + struct nft_elem_priv *elem_priv); int __init nft_chain_filter_init(void); void nft_chain_filter_fini(void); @@ -1733,6 +1756,7 @@ struct nftables_pernet { struct list_head notify_list; struct mutex commit_mutex; u64 table_handle; + u64 tstamp; unsigned int base_seq; unsigned int gc_seq; u8 validate_state; @@ -1745,6 +1769,11 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net) return net_generic(net, nf_tables_net_id); } +static inline u64 nft_net_tstamp(const struct net *net) +{ + return nft_pernet(net)->tstamp; +} + #define __NFT_REDUCE_READONLY 1UL #define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY diff --git a/include/net/netkit.h b/include/net/netkit.h new file mode 100644 index 0000000000..9ec0163739 --- /dev/null +++ b/include/net/netkit.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Isovalent */ +#ifndef __NET_NETKIT_H +#define __NET_NETKIT_H + +#include <linux/bpf.h> + +#ifdef CONFIG_NETKIT +int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev)); +#else +static inline int netkit_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_link_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_prog_detach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} + +static inline struct net_device *netkit_peer_dev(struct net_device *dev) +{ + return NULL; +} +#endif /* CONFIG_NETKIT */ +#endif /* __NET_NETKIT_H */ diff --git a/include/net/netlink.h b/include/net/netlink.h index 8a7cd1170e..83bdf787ae 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -128,6 +128,8 @@ * nla_len(nla) length of attribute payload * * Attribute Payload Access for Basic Types: + * nla_get_uint(nla) get payload for a uint attribute + * nla_get_sint(nla) get payload for a sint attribute * nla_get_u8(nla) get payload for a u8 attribute * nla_get_u16(nla) get payload for a u16 attribute * nla_get_u32(nla) get payload for a u32 attribute @@ -183,6 +185,8 @@ enum { NLA_REJECT, NLA_BE16, NLA_BE32, + NLA_SINT, + NLA_UINT, __NLA_TYPE_MAX, }; @@ -229,6 +233,7 @@ enum nla_policy_validation { * nested header (or empty); len field is used if * nested_policy is also used, for the max attr * number in the nested policy. + * NLA_SINT, NLA_UINT, * NLA_U8, NLA_U16, * NLA_U32, NLA_U64, * NLA_S8, NLA_S16, @@ -260,12 +265,14 @@ enum nla_policy_validation { * while an array has the nested attributes at another * level down and the attribute types directly in the * nesting don't matter. + * NLA_UINT, * NLA_U8, * NLA_U16, * NLA_U32, * NLA_U64, * NLA_BE16, * NLA_BE32, + * NLA_SINT, * NLA_S8, * NLA_S16, * NLA_S32, @@ -280,6 +287,7 @@ enum nla_policy_validation { * or NLA_POLICY_FULL_RANGE_SIGNED() macros instead. * Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and * NLA_POLICY_RANGE() macros. + * NLA_UINT, * NLA_U8, * NLA_U16, * NLA_U32, @@ -288,6 +296,7 @@ enum nla_policy_validation { * to a struct netlink_range_validation that indicates * the min/max values. * Use NLA_POLICY_FULL_RANGE(). + * NLA_SINT, * NLA_S8, * NLA_S16, * NLA_S32, @@ -351,8 +360,8 @@ struct nla_policy { const u32 mask; const char *reject_message; const struct nla_policy *nested_policy; - struct netlink_range_validation *range; - struct netlink_range_validation_signed *range_signed; + const struct netlink_range_validation *range; + const struct netlink_range_validation_signed *range_signed; struct { s16 min, max; }; @@ -377,9 +386,11 @@ struct nla_policy { #define __NLA_IS_UINT_TYPE(tp) \ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || \ - tp == NLA_U64 || tp == NLA_BE16 || tp == NLA_BE32) + tp == NLA_U64 || tp == NLA_UINT || \ + tp == NLA_BE16 || tp == NLA_BE32) #define __NLA_IS_SINT_TYPE(tp) \ - (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64) + (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64 || \ + tp == NLA_SINT) #define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) #define NLA_ENSURE_UINT_TYPE(tp) \ @@ -1358,6 +1369,22 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) } /** + * nla_put_uint - Add a variable-size unsigned int to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_uint(struct sk_buff *skb, int attrtype, u64 value) +{ + u64 tmp64 = value; + u32 tmp32 = value; + + if (tmp64 == tmp32) + return nla_put_u32(skb, attrtype, tmp32); + return nla_put(skb, attrtype, sizeof(u64), &tmp64); +} + +/** * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -1512,6 +1539,22 @@ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, } /** + * nla_put_sint - Add a variable-size signed int to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_sint(struct sk_buff *skb, int attrtype, s64 value) +{ + s64 tmp64 = value; + s32 tmp32 = value; + + if (tmp64 == tmp32) + return nla_put_s32(skb, attrtype, tmp32); + return nla_put(skb, attrtype, sizeof(s64), &tmp64); +} + +/** * nla_put_string - Add a string netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -1668,6 +1711,17 @@ static inline u64 nla_get_u64(const struct nlattr *nla) } /** + * nla_get_uint - return payload of uint attribute + * @nla: uint netlink attribute + */ +static inline u64 nla_get_uint(const struct nlattr *nla) +{ + if (nla_len(nla) == sizeof(u32)) + return nla_get_u32(nla); + return nla_get_u64(nla); +} + +/** * nla_get_be64 - return payload of __be64 attribute * @nla: __be64 netlink attribute */ @@ -1730,6 +1784,17 @@ static inline s64 nla_get_s64(const struct nlattr *nla) } /** + * nla_get_sint - return payload of uint attribute + * @nla: uint netlink attribute + */ +static inline s64 nla_get_sint(const struct nlattr *nla) +{ + if (nla_len(nla) == sizeof(s32)) + return nla_get_s32(nla); + return nla_get_s64(nla); +} + +/** * nla_get_flag - return payload of flag attribute * @nla: flag netlink attribute */ diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 1f463b3957..bae914815a 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -107,7 +107,7 @@ struct netns_ct { struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; struct nf_ip_net nf_ct_proto; #if defined(CONFIG_NF_CONNTRACK_LABELS) - unsigned int labels_used; + atomic_t labels_used; #endif }; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 7a41c47915..73f43f6991 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -132,6 +132,9 @@ struct netns_ipv4 { u8 sysctl_tcp_syncookies; u8 sysctl_tcp_migrate_req; u8 sysctl_tcp_comp_sack_nr; + u8 sysctl_tcp_backlog_ack_defer; + u8 sysctl_tcp_pingpong_thresh; + int sysctl_tcp_reordering; u8 sysctl_tcp_retries1; u8 sysctl_tcp_retries2; diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 2b12725de9..d92046a4a0 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -92,7 +92,7 @@ struct nh_res_table { u32 unbalanced_timer; u16 num_nh_buckets; - struct nh_res_bucket nh_buckets[]; + struct nh_res_bucket nh_buckets[] __counted_by(num_nh_buckets); }; struct nh_grp_entry { @@ -126,7 +126,7 @@ struct nh_group { bool has_v4; struct nh_res_table __rcu *res_table; - struct nh_grp_entry nh_entries[]; + struct nh_grp_entry nh_entries[] __counted_by(num_nh); }; struct nexthop { @@ -187,7 +187,7 @@ struct nh_notifier_grp_entry_info { struct nh_notifier_grp_info { u16 num_nh; bool is_fdb; - struct nh_notifier_grp_entry_info nh_entries[]; + struct nh_notifier_grp_entry_info nh_entries[] __counted_by(num_nh); }; struct nh_notifier_res_bucket_info { @@ -200,7 +200,7 @@ struct nh_notifier_res_bucket_info { struct nh_notifier_res_table_info { u16 num_nh_buckets; - struct nh_notifier_single_info nhs[]; + struct nh_notifier_single_info nhs[] __counted_by(num_nh_buckets); }; struct nh_notifier_info { diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index 8e7751464f..4ebd544ae9 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -8,23 +8,46 @@ /** * DOC: page_pool allocator * - * The page_pool allocator is optimized for the XDP mode that - * uses one frame per-page, but it can fallback on the - * regular page allocator APIs. - * - * Basic use involves replacing alloc_pages() calls with the - * page_pool_alloc_pages() call. Drivers should use - * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). - * - * The API keeps track of in-flight pages, in order to let API users know - * when it is safe to free a page_pool object. Thus, API users - * must call page_pool_put_page() to free the page, or attach - * the page to a page_pool-aware object like skbs marked with + * The page_pool allocator is optimized for recycling page or page fragment used + * by skb packet and xdp frame. + * + * Basic use involves replacing and alloc_pages() calls with page_pool_alloc(), + * which allocate memory with or without page splitting depending on the + * requested memory size. + * + * If the driver knows that it always requires full pages or its allocations are + * always smaller than half a page, it can use one of the more specific API + * calls: + * + * 1. page_pool_alloc_pages(): allocate memory without page splitting when + * driver knows that the memory it need is always bigger than half of the page + * allocated from page pool. There is no cache line dirtying for 'struct page' + * when a page is recycled back to the page pool. + * + * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver + * knows that the memory it need is always smaller than or equal to half of the + * page allocated from page pool. Page splitting enables memory saving and thus + * avoids TLB/cache miss for data access, but there also is some cost to + * implement page splitting, mainly some cache line dirtying/bouncing for + * 'struct page' and atomic operation for page->pp_frag_count. + * + * The API keeps track of in-flight pages, in order to let API users know when + * it is safe to free a page_pool object, the API users must call + * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or + * attach the page_pool object to a page_pool-aware object like skbs marked with * skb_mark_for_recycle(). * - * API users must call page_pool_put_page() once on a page, as it - * will either recycle the page, or in case of refcnt > 1, it will - * release the DMA mapping and in-flight state accounting. + * page_pool_put_page() may be called multi times on the same page if a page is + * split into multi fragments. For the last fragment, it will either recycle the + * page, or in case of page->_refcount > 1, it will release the DMA mapping and + * in-flight state accounting. + * + * dma_sync_single_range_for_device() is only called for the last fragment when + * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the + * last freed fragment to do the sync_for_device operation for all fragments in + * the same page when a page is split, the API user must setup pool->p.max_len + * and pool->p.offset correctly and ensure that page_pool_put_page() is called + * with dma_sync_size being -1 for fragment API. */ #ifndef _NET_PAGE_POOL_HELPERS_H #define _NET_PAGE_POOL_HELPERS_H @@ -73,6 +96,17 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) return page_pool_alloc_pages(pool, gfp); } +/** + * page_pool_dev_alloc_frag() - allocate a page fragment. + * @pool: pool from which to allocate + * @offset: offset to the allocated page + * @size: requested size + * + * Get a page fragment from the page allocator or page_pool caches. + * + * Return: + * Return allocated page fragment, otherwise return NULL. + */ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size) @@ -82,6 +116,91 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, return page_pool_alloc_frag(pool, offset, size, gfp); } +static inline struct page *page_pool_alloc(struct page_pool *pool, + unsigned int *offset, + unsigned int *size, gfp_t gfp) +{ + unsigned int max_size = PAGE_SIZE << pool->p.order; + struct page *page; + + if ((*size << 1) > max_size) { + *size = max_size; + *offset = 0; + return page_pool_alloc_pages(pool, gfp); + } + + page = page_pool_alloc_frag(pool, offset, *size, gfp); + if (unlikely(!page)) + return NULL; + + /* There is very likely not enough space for another fragment, so append + * the remaining size to the current fragment to avoid truesize + * underestimate problem. + */ + if (pool->frag_offset + *size > max_size) { + *size = max_size - *offset; + pool->frag_offset = max_size; + } + + return page; +} + +/** + * page_pool_dev_alloc() - allocate a page or a page fragment. + * @pool: pool from which to allocate + * @offset: offset to the allocated page + * @size: in as the requested size, out as the allocated size + * + * Get a page or a page fragment from the page allocator or page_pool caches + * depending on the requested size in order to allocate memory with least memory + * utilization and performance penalty. + * + * Return: + * Return allocated page or page fragment, otherwise return NULL. + */ +static inline struct page *page_pool_dev_alloc(struct page_pool *pool, + unsigned int *offset, + unsigned int *size) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc(pool, offset, size, gfp); +} + +static inline void *page_pool_alloc_va(struct page_pool *pool, + unsigned int *size, gfp_t gfp) +{ + unsigned int offset; + struct page *page; + + /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ + page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); + if (unlikely(!page)) + return NULL; + + return page_address(page) + offset; +} + +/** + * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its + * va. + * @pool: pool from which to allocate + * @size: in as the requested size, out as the allocated size + * + * This is just a thin wrapper around the page_pool_alloc() API, and + * it returns va of the allocated page or page fragment. + * + * Return: + * Return the va for the allocated page or page fragment, otherwise return NULL. + */ +static inline void *page_pool_dev_alloc_va(struct page_pool *pool, + unsigned int *size) +{ + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + return page_pool_alloc_va(pool, size, gfp); +} + /** * page_pool_get_dma_dir() - Retrieve the stored DMA direction. * @pool: pool from which page was allocated @@ -115,28 +234,49 @@ static inline long page_pool_defrag_page(struct page *page, long nr) long ret; /* If nr == pp_frag_count then we have cleared all remaining - * references to the page. No need to actually overwrite it, instead - * we can leave this to be overwritten by the calling function. + * references to the page: + * 1. 'n == 1': no need to actually overwrite it. + * 2. 'n != 1': overwrite it with one, which is the rare case + * for pp_frag_count draining. * - * The main advantage to doing this is that an atomic_read is - * generally a much cheaper operation than an atomic update, - * especially when dealing with a page that may be partitioned - * into only 2 or 3 pieces. + * The main advantage to doing this is that not only we avoid a atomic + * update, as an atomic_read is generally a much cheaper operation than + * an atomic update, especially when dealing with a page that may be + * partitioned into only 2 or 3 pieces; but also unify the pp_frag_count + * handling by ensuring all pages have partitioned into only 1 piece + * initially, and only overwrite it when the page is partitioned into + * more than one piece. */ - if (atomic_long_read(&page->pp_frag_count) == nr) + if (atomic_long_read(&page->pp_frag_count) == nr) { + /* As we have ensured nr is always one for constant case using + * the BUILD_BUG_ON(), only need to handle the non-constant case + * here for pp_frag_count draining, which is a rare case. + */ + BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); + if (!__builtin_constant_p(nr)) + atomic_long_set(&page->pp_frag_count, 1); + return 0; + } ret = atomic_long_sub_return(nr, &page->pp_frag_count); WARN_ON(ret < 0); + + /* We are the last user here too, reset pp_frag_count back to 1 to + * ensure all pages have been partitioned into 1 piece initially, + * this should be the rare case when the last two fragment users call + * page_pool_defrag_page() currently. + */ + if (unlikely(!ret)) + atomic_long_set(&page->pp_frag_count, 1); + return ret; } -static inline bool page_pool_is_last_frag(struct page_pool *pool, - struct page *page) +static inline bool page_pool_is_last_frag(struct page *page) { - /* If fragments aren't enabled or count is 0 we were the last user */ - return !(pool->p.flags & PP_FLAG_PAGE_FRAG) || - (page_pool_defrag_page(page, 1) == 0); + /* If page_pool_defrag_page() returns 0, we were the last user */ + return page_pool_defrag_page(page, 1) == 0; } /** @@ -161,7 +301,7 @@ static inline void page_pool_put_page(struct page_pool *pool, * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL - if (!page_pool_is_last_frag(pool, page)) + if (!page_pool_is_last_frag(page)) return; page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); @@ -197,10 +337,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, page_pool_put_full_page(pool, page, true); } -#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ +#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ (sizeof(dma_addr_t) > sizeof(unsigned long)) /** + * page_pool_free_va() - free a va into the page_pool + * @pool: pool from which va was allocated + * @va: va to be freed + * @allow_direct: freed by the consumer, allow lockless caching + * + * Free a va allocated from page_pool_allo_va(). + */ +static inline void page_pool_free_va(struct page_pool *pool, void *va, + bool allow_direct) +{ + page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); +} + +/** * page_pool_get_dma_addr() - Retrieve the stored DMA address. * @page: page allocated from a page pool * @@ -211,17 +365,25 @@ static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { dma_addr_t ret = page->dma_addr; - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) + ret <<= PAGE_SHIFT; return ret; } -static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) { + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) { + page->dma_addr = addr >> PAGE_SHIFT; + + /* We assume page alignment to shave off bottom bits, + * if this "compression" doesn't work we need to drop. + */ + return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT; + } + page->dma_addr = addr; - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) - page->dma_addr_upper = upper_32_bits(addr); + return false; } static inline bool page_pool_put(struct page_pool *pool) diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 887e7946a5..6fc5134095 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -17,10 +17,8 @@ * Please note DMA-sync-for-CPU is still * device driver responsibility */ -#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ #define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\ - PP_FLAG_DMA_SYNC_DEV |\ - PP_FLAG_PAGE_FRAG) + PP_FLAG_DMA_SYNC_DEV) /* * Fast allocation side cache array/stack @@ -45,7 +43,7 @@ struct pp_alloc_cache { /** * struct page_pool_params - page pool parameters - * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG + * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV * @order: 2^order pages on allocation * @pool_size: size of the ptr_ring * @nid: NUMA node id to allocate from pages from diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index f308e82686..a76c9171db 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -154,6 +154,12 @@ __cls_set_class(unsigned long *clp, unsigned long cl) return xchg(clp, cl); } +static inline void tcf_set_drop_reason(struct tcf_result *res, + enum skb_drop_reason reason) +{ + res->drop_reason = reason; +} + static inline void __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) { diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 15960564e0..9fa1d0794d 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -20,10 +20,10 @@ struct qdisc_walker { int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); }; -static inline void *qdisc_priv(struct Qdisc *q) -{ - return &q->privdata; -} +#define qdisc_priv(q) \ + _Generic(q, \ + const struct Qdisc * : (const void *)&q->privdata, \ + struct Qdisc * : (void *)&q->privdata) static inline struct Qdisc *qdisc_from_priv(void *priv) { diff --git a/include/net/regulatory.h b/include/net/regulatory.h index b2cb4a9eb0..ebf9e028d1 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -213,6 +213,7 @@ struct ieee80211_reg_rule { u32 flags; u32 dfs_cac_ms; bool has_wmm; + s8 psd; }; struct ieee80211_regdomain { diff --git a/include/net/route.h b/include/net/route.h index 51a45b1887..980ab474ea 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -37,7 +37,7 @@ #define RTO_ONLINK 0x01 -#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) +#define RT_CONN_FLAGS(sk) (RT_TOS(READ_ONCE(inet_sk(sk)->tos)) | sock_flag(sk, SOCK_LOCALROUTE)) #define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE)) static inline __u8 ip_sock_rt_scope(const struct sock *sk) @@ -50,7 +50,7 @@ static inline __u8 ip_sock_rt_scope(const struct sock *sk) static inline __u8 ip_sock_rt_tos(const struct sock *sk) { - return RT_TOS(inet_sk(sk)->tos); + return RT_TOS(READ_ONCE(inet_sk(sk)->tos)); } struct ip_tunnel_info; @@ -136,12 +136,6 @@ static inline struct rtable *__ip_route_output_key(struct net *net, struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, const struct sock *sk); -struct rtable *ip_route_output_tunnel(struct sk_buff *skb, - struct net_device *dev, - struct net *net, __be32 *saddr, - const struct ip_tunnel_info *info, - u8 protocol, bool use_cache); - struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e940debac4..959a7725c2 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -324,7 +324,6 @@ struct Qdisc_ops { struct module *owner; }; - struct tcf_result { union { struct { @@ -332,8 +331,8 @@ struct tcf_result { u32 classid; }; const struct tcf_proto *goto_tp; - }; + enum skb_drop_reason drop_reason; }; struct tcf_chain; @@ -591,6 +590,7 @@ static inline void sch_tree_unlock(struct Qdisc *q) extern struct Qdisc noop_qdisc; extern struct Qdisc_ops noop_qdisc_ops; extern struct Qdisc_ops pfifo_fast_ops; +extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1]; extern struct Qdisc_ops mq_qdisc_ops; extern struct Qdisc_ops noqueue_qdisc_ops; extern const struct Qdisc_ops *default_qdisc_ops; diff --git a/include/net/sock.h b/include/net/sock.h index e70c903b04..f9a9f61fa1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1821,12 +1821,11 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk) static inline void sock_release_ownership(struct sock *sk) { - if (sock_owned_by_user_nocheck(sk)) { - sk->sk_lock.owned = 0; + DEBUG_NET_WARN_ON_ONCE(!sock_owned_by_user_nocheck(sk)); + sk->sk_lock.owned = 0; - /* The sk_lock has mutex_unlock() semantics: */ - mutex_release(&sk->sk_lock.dep_map, _RET_IP_); - } + /* The sk_lock has mutex_unlock() semantics: */ + mutex_release(&sk->sk_lock.dep_map, _RET_IP_); } /* no reclassification while locks are held */ @@ -1865,11 +1864,13 @@ int sk_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int sock_setsockopt(struct socket *sock, int level, int op, sockptr_t optval, unsigned int optlen); +int do_sock_setsockopt(struct socket *sock, bool compat, int level, + int optname, sockptr_t optval, int optlen); +int do_sock_getsockopt(struct socket *sock, bool compat, int level, + int optname, sockptr_t optval, sockptr_t optlen); int sk_getsockopt(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); -int sock_getsockopt(struct socket *sock, int level, int op, - char __user *optval, int __user *optlen); int sock_gettstamp(struct socket *sock, void __user *userstamp, bool timeval, bool time32); struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, @@ -2249,7 +2250,7 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) } } -bool sk_mc_loop(struct sock *sk); +bool sk_mc_loop(const struct sock *sk); static inline bool sk_can_gso(const struct sock *sk) { diff --git a/include/net/switchdev.h b/include/net/switchdev.h index a43062d4c7..8346b0d295 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -308,6 +308,9 @@ void switchdev_deferred_process(void); int switchdev_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct netlink_ext_ack *extack); +bool switchdev_port_obj_act_is_deferred(struct net_device *dev, + enum switchdev_notifier_type nt, + const struct switchdev_obj *obj); int switchdev_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct netlink_ext_ack *extack); diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h index 1dc2f827d0..77f87c622a 100644 --- a/include/net/tc_act/tc_ct.h +++ b/include/net/tc_act/tc_ct.h @@ -22,6 +22,7 @@ struct tcf_ct_params { struct nf_nat_range2 range; bool ipv4_range; + bool put_labels; u16 ct_action; diff --git a/include/net/tcp.h b/include/net/tcp.h index 9cbcfb3c95..2ac9475be1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -37,6 +37,7 @@ #include <net/snmp.h> #include <net/ip.h> #include <net/tcp_states.h> +#include <net/tcp_ao.h> #include <net/inet_ecn.h> #include <net/dst.h> #include <net/mptcp.h> @@ -131,6 +132,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */ #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ +static_assert((1 << ATO_BITS) > TCP_DELACK_MAX); + #if HZ >= 100 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ #define TCP_ATO_MIN ((unsigned)(HZ/25)) @@ -164,7 +167,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_KEEPCNT 127 #define MAX_TCP_SYNCNT 127 -#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) +/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds + * to avoid overflows. This assumes a clock smaller than 1 Mhz. + * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz. + */ +#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC) + #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated * after this time. It should be equal * (or greater than) TCP_TIMEWAIT_LEN @@ -187,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */ #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */ #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ #define TCPOPT_EXP 254 /* Experimental */ @@ -429,7 +438,6 @@ int tcp_mmap(struct file *file, struct socket *sock, void tcp_parse_options(const struct net *net, const struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab, struct tcp_fastopen_cookie *foc); -const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* * BPF SKB-less helpers @@ -726,7 +734,7 @@ static inline void tcp_fast_path_check(struct sock *sk) u32 tcp_delack_max(const struct sock *sk); /* Compute the actual rto_min value */ -static inline u32 tcp_rto_min(struct sock *sk) +static inline u32 tcp_rto_min(const struct sock *sk) { const struct dst_entry *dst = __sk_dst_get(sk); u32 rto_min = inet_csk(sk)->icsk_rto_min; @@ -736,7 +744,7 @@ static inline u32 tcp_rto_min(struct sock *sk) return rto_min; } -static inline u32 tcp_rto_min_us(struct sock *sk) +static inline u32 tcp_rto_min_us(const struct sock *sk) { return jiffies_to_usecs(tcp_rto_min(sk)); } @@ -796,22 +804,31 @@ static inline u64 tcp_clock_us(void) return div_u64(tcp_clock_ns(), NSEC_PER_USEC); } -/* This should only be used in contexts where tp->tcp_mstamp is up to date */ -static inline u32 tcp_time_stamp(const struct tcp_sock *tp) +static inline u64 tcp_clock_ms(void) +{ + return div_u64(tcp_clock_ns(), NSEC_PER_MSEC); +} + +/* TCP Timestamp included in TS option (RFC 1323) can either use ms + * or usec resolution. Each socket carries a flag to select one or other + * resolution, as the route attribute could change anytime. + * Each flow must stick to initial resolution. + */ +static inline u32 tcp_clock_ts(bool usec_ts) { - return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ); + return usec_ts ? tcp_clock_us() : tcp_clock_ms(); } -/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */ -static inline u64 tcp_ns_to_ts(u64 ns) +static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp) { - return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ); + return div_u64(tp->tcp_mstamp, USEC_PER_MSEC); } -/* Could use tcp_clock_us() / 1000, but this version uses a single divide */ -static inline u32 tcp_time_stamp_raw(void) +static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp) { - return tcp_ns_to_ts(tcp_clock_ns()); + if (tp->tcp_usec_ts) + return tp->tcp_mstamp; + return tcp_time_stamp_ms(tp); } void tcp_mstamp_refresh(struct tcp_sock *tp); @@ -821,17 +838,30 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) return max_t(s64, t1 - t0, 0); } -static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) -{ - return tcp_ns_to_ts(skb->skb_mstamp_ns); -} - /* provide the departure time in us unit */ static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) { return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC); } +/* Provide skb TSval in usec or ms unit */ +static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb) +{ + if (usec_ts) + return tcp_skb_timestamp_us(skb); + + return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC); +} + +static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw) +{ + return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset; +} + +static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq) +{ + return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off; +} #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) @@ -1460,13 +1490,15 @@ static inline int tcp_space_from_win(const struct sock *sk, int win) return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); } +/* Assume a conservative default of 1200 bytes of payload per 4K page. + * This may be adjusted later in tcp_measure_rcv_mss(). + */ +#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \ + SKB_TRUESIZE(4096)) + static inline void tcp_scaling_ratio_init(struct sock *sk) { - /* Assume a conservative default of 1200 bytes of payload per 4K page. - * This may be adjusted later in tcp_measure_rcv_mss(). - */ - tcp_sk(sk)->scaling_ratio = (1200 << TCP_RMEM_TO_WIN_SCALE) / - SKB_TRUESIZE(4096); + tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; } /* Note: caller must be prepared to deal with negative returns */ @@ -1602,7 +1634,7 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) return true; if (unlikely(!time_before32(ktime_get_seconds(), - rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))) + rx_opt->ts_recent_stamp + TCP_PAWS_WRAP))) return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, @@ -1662,12 +1694,7 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) tp->retransmit_skb_hint = NULL; } -union tcp_md5_addr { - struct in_addr a4; -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr a6; -#endif -}; +#define tcp_md5_addr tcp_ao_addr /* - key database */ struct tcp_md5sig_key { @@ -1711,12 +1738,39 @@ union tcp_md5sum_block { #endif }; -/* - pool: digest algorithm, hash description and scratch buffer */ -struct tcp_md5sig_pool { - struct ahash_request *md5_req; - void *scratch; +/* + * struct tcp_sigpool - per-CPU pool of ahash_requests + * @scratch: per-CPU temporary area, that can be used between + * tcp_sigpool_start() and tcp_sigpool_end() to perform + * crypto request + * @req: pre-allocated ahash request + */ +struct tcp_sigpool { + void *scratch; + struct ahash_request *req; }; +int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size); +void tcp_sigpool_get(unsigned int id); +void tcp_sigpool_release(unsigned int id); +int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp, + const struct sk_buff *skb, + unsigned int header_len); + +/** + * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash + * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() + * @c: returned tcp_sigpool for usage (uninitialized on failure) + * + * Returns 0 on success, error otherwise. + */ +int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c); +/** + * tcp_sigpool_end - enable bh and stop using tcp_sigpool + * @c: tcp_sigpool context that was returned by tcp_sigpool_start() + */ +void tcp_sigpool_end(struct tcp_sigpool *c); +size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len); /* - functions */ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb); @@ -1729,28 +1783,36 @@ int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags); +void tcp_clear_md5_list(struct sock *sk); struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk); #ifdef CONFIG_TCP_MD5SIG -#include <linux/jump_label.h> -extern struct static_key_false_deferred tcp_md5_needed; struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, const union tcp_md5_addr *addr, - int family); + int family, bool any_l3index); static inline struct tcp_md5sig_key * tcp_md5_do_lookup(const struct sock *sk, int l3index, const union tcp_md5_addr *addr, int family) { if (!static_branch_unlikely(&tcp_md5_needed.key)) return NULL; - return __tcp_md5_do_lookup(sk, l3index, addr, family); + return __tcp_md5_do_lookup(sk, l3index, addr, family, false); +} + +static inline struct tcp_md5sig_key * +tcp_md5_do_lookup_any_l3index(const struct sock *sk, + const union tcp_md5_addr *addr, int family) +{ + if (!static_branch_unlikely(&tcp_md5_needed.key)) + return NULL; + return __tcp_md5_do_lookup(sk, 0, addr, family, true); } enum skb_drop_reason tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, const void *saddr, const void *daddr, - int family, int dif, int sdif); + int family, int l3index, const __u8 *hash_location); #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) @@ -1762,27 +1824,29 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index, return NULL; } +static inline struct tcp_md5sig_key * +tcp_md5_do_lookup_any_l3index(const struct sock *sk, + const union tcp_md5_addr *addr, int family) +{ + return NULL; +} + static inline enum skb_drop_reason tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, const void *saddr, const void *daddr, - int family, int dif, int sdif) + int family, int l3index, const __u8 *hash_location) { return SKB_NOT_DROPPED_YET; } #define tcp_twsk_md5_key(twsk) NULL #endif -bool tcp_alloc_md5sig_pool(void); +int tcp_md5_alloc_sigpool(void); +void tcp_md5_release_sigpool(void); +void tcp_md5_add_sigpool(void); +extern int tcp_md5_sigpool_id; -struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); -static inline void tcp_put_md5sig_pool(void) -{ - local_bh_enable(); -} - -int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, - unsigned int header_len); -int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, +int tcp_md5_hash_key(struct tcp_sigpool *hp, const struct tcp_md5sig_key *key); /* From tcp_fastopen.c */ @@ -2087,7 +2151,11 @@ INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)) INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)); +#ifdef CONFIG_INET void tcp_gro_complete(struct sk_buff *skb); +#else +static inline void tcp_gro_complete(struct sk_buff *skb) { } +#endif void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); @@ -2127,6 +2195,18 @@ struct tcp_sock_af_ops { sockptr_t optval, int optlen); #endif +#ifdef CONFIG_TCP_AO + int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen); + struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, + struct sock *addr_sk, + int sndid, int rcvid); + int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key, + const struct sock *sk, + __be32 sisn, __be32 disn, bool send); + int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +#endif }; struct tcp_request_sock_ops { @@ -2139,6 +2219,15 @@ struct tcp_request_sock_ops { const struct sock *sk, const struct sk_buff *skb); #endif +#ifdef CONFIG_TCP_AO + struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, + struct request_sock *req, + int sndid, int rcvid); + int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk); + int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt, + struct request_sock *req, const struct sk_buff *skb, + int hash_offset, u32 sne); +#endif #ifdef CONFIG_SYN_COOKIES __u32 (*cookie_init_seq)(const struct sk_buff *skb, __u16 *mss); @@ -2179,6 +2268,76 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, } #endif +struct tcp_key { + union { + struct { + struct tcp_ao_key *ao_key; + char *traffic_key; + u32 sne; + u8 rcv_next; + }; + struct tcp_md5sig_key *md5_key; + }; + enum { + TCP_KEY_NONE = 0, + TCP_KEY_MD5, + TCP_KEY_AO, + } type; +}; + +static inline void tcp_get_current_key(const struct sock *sk, + struct tcp_key *out) +{ +#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG) + const struct tcp_sock *tp = tcp_sk(sk); +#endif + +#ifdef CONFIG_TCP_AO + if (static_branch_unlikely(&tcp_ao_needed.key)) { + struct tcp_ao_info *ao; + + ao = rcu_dereference_protected(tp->ao_info, + lockdep_sock_is_held(sk)); + if (ao) { + out->ao_key = READ_ONCE(ao->current_key); + out->type = TCP_KEY_AO; + return; + } + } +#endif +#ifdef CONFIG_TCP_MD5SIG + if (static_branch_unlikely(&tcp_md5_needed.key) && + rcu_access_pointer(tp->md5sig_info)) { + out->md5_key = tp->af_specific->md5_lookup(sk, sk); + if (out->md5_key) { + out->type = TCP_KEY_MD5; + return; + } + } +#endif + out->type = TCP_KEY_NONE; +} + +static inline bool tcp_key_is_md5(const struct tcp_key *key) +{ +#ifdef CONFIG_TCP_MD5SIG + if (static_branch_unlikely(&tcp_md5_needed.key) && + key->type == TCP_KEY_MD5) + return true; +#endif + return false; +} + +static inline bool tcp_key_is_ao(const struct tcp_key *key) +{ +#ifdef CONFIG_TCP_AO + if (static_branch_unlikely(&tcp_ao_needed.key) && + key->type == TCP_KEY_AO) + return true; +#endif + return false; +} + int tcpv4_offload_init(void); void tcp_v4_init(void); @@ -2343,7 +2502,7 @@ struct tcp_ulp_ops { /* cleanup ulp */ void (*release)(struct sock *sk); /* diagnostic */ - int (*get_info)(const struct sock *sk, struct sk_buff *skb); + int (*get_info)(struct sock *sk, struct sk_buff *skb); size_t (*get_info_size)(const struct sock *sk); /* clone ulp */ void (*clone)(const struct request_sock *req, struct sock *newsk, @@ -2537,4 +2696,116 @@ static inline u64 tcp_transmit_time(const struct sock *sk) return 0; } +static inline int tcp_parse_auth_options(const struct tcphdr *th, + const u8 **md5_hash, const struct tcp_ao_hdr **aoh) +{ + const u8 *md5_tmp, *ao_tmp; + int ret; + + ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp); + if (ret) + return ret; + + if (md5_hash) + *md5_hash = md5_tmp; + + if (aoh) { + if (!ao_tmp) + *aoh = NULL; + else + *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2); + } + + return 0; +} + +static inline bool tcp_ao_required(struct sock *sk, const void *saddr, + int family, int l3index, bool stat_inc) +{ +#ifdef CONFIG_TCP_AO + struct tcp_ao_info *ao_info; + struct tcp_ao_key *ao_key; + + if (!static_branch_unlikely(&tcp_ao_needed.key)) + return false; + + ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info, + lockdep_sock_is_held(sk)); + if (!ao_info) + return false; + + ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1); + if (ao_info->ao_required || ao_key) { + if (stat_inc) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED); + atomic64_inc(&ao_info->counters.ao_required); + } + return true; + } +#endif + return false; +} + +/* Called with rcu_read_lock() */ +static inline enum skb_drop_reason +tcp_inbound_hash(struct sock *sk, const struct request_sock *req, + const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif) +{ + const struct tcphdr *th = tcp_hdr(skb); + const struct tcp_ao_hdr *aoh; + const __u8 *md5_location; + int l3index; + + /* Invalid option or two times meet any of auth options */ + if (tcp_parse_auth_options(th, &md5_location, &aoh)) { + tcp_hash_fail("TCP segment has incorrect auth options set", + family, skb, ""); + return SKB_DROP_REASON_TCP_AUTH_HDR; + } + + if (req) { + if (tcp_rsk_used_ao(req) != !!aoh) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); + tcp_hash_fail("TCP connection can't start/end using TCP-AO", + family, skb, "%s", + !aoh ? "missing AO" : "AO signed"); + return SKB_DROP_REASON_TCP_AOFAILURE; + } + } + + /* sdif set, means packet ingressed via a device + * in an L3 domain and dif is set to the l3mdev + */ + l3index = sdif ? dif : 0; + + /* Fast path: unsigned segments */ + if (likely(!md5_location && !aoh)) { + /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid + * for the remote peer. On TCP-AO established connection + * the last key is impossible to remove, so there's + * always at least one current_key. + */ + if (tcp_ao_required(sk, saddr, family, l3index, true)) { + tcp_hash_fail("AO hash is required, but not found", + family, skb, "L3 index %d", l3index); + return SKB_DROP_REASON_TCP_AONOTFOUND; + } + if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); + tcp_hash_fail("MD5 Hash not found", + family, skb, "L3 index %d", l3index); + return SKB_DROP_REASON_TCP_MD5NOTFOUND; + } + return SKB_NOT_DROPPED_YET; + } + + if (aoh) + return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); + + return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, + l3index, md5_location); +} + #endif /* _TCP_H */ diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h new file mode 100644 index 0000000000..b04afced4c --- /dev/null +++ b/include/net/tcp_ao.h @@ -0,0 +1,389 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _TCP_AO_H +#define _TCP_AO_H + +#define TCP_AO_KEY_ALIGN 1 +#define __tcp_ao_key_align __aligned(TCP_AO_KEY_ALIGN) + +union tcp_ao_addr { + struct in_addr a4; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr a6; +#endif +}; + +struct tcp_ao_hdr { + u8 kind; + u8 length; + u8 keyid; + u8 rnext_keyid; +}; + +struct tcp_ao_counters { + atomic64_t pkt_good; + atomic64_t pkt_bad; + atomic64_t key_not_found; + atomic64_t ao_required; + atomic64_t dropped_icmp; +}; + +struct tcp_ao_key { + struct hlist_node node; + union tcp_ao_addr addr; + u8 key[TCP_AO_MAXKEYLEN] __tcp_ao_key_align; + unsigned int tcp_sigpool_id; + unsigned int digest_size; + int l3index; + u8 prefixlen; + u8 family; + u8 keylen; + u8 keyflags; + u8 sndid; + u8 rcvid; + u8 maclen; + struct rcu_head rcu; + atomic64_t pkt_good; + atomic64_t pkt_bad; + u8 traffic_keys[]; +}; + +static inline u8 *rcv_other_key(struct tcp_ao_key *key) +{ + return key->traffic_keys; +} + +static inline u8 *snd_other_key(struct tcp_ao_key *key) +{ + return key->traffic_keys + key->digest_size; +} + +static inline int tcp_ao_maclen(const struct tcp_ao_key *key) +{ + return key->maclen; +} + +/* Use tcp_ao_len_aligned() for TCP header calculations */ +static inline int tcp_ao_len(const struct tcp_ao_key *key) +{ + return tcp_ao_maclen(key) + sizeof(struct tcp_ao_hdr); +} + +static inline int tcp_ao_len_aligned(const struct tcp_ao_key *key) +{ + return round_up(tcp_ao_len(key), 4); +} + +static inline unsigned int tcp_ao_digest_size(struct tcp_ao_key *key) +{ + return key->digest_size; +} + +static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key) +{ + return sizeof(struct tcp_ao_key) + (key->digest_size << 1); +} + +struct tcp_ao_info { + /* List of tcp_ao_key's */ + struct hlist_head head; + /* current_key and rnext_key aren't maintained on listen sockets. + * Their purpose is to cache keys on established connections, + * saving needless lookups. Never dereference any of them from + * listen sockets. + * ::current_key may change in RX to the key that was requested by + * the peer, please use READ_ONCE()/WRITE_ONCE() in order to avoid + * load/store tearing. + * Do the same for ::rnext_key, if you don't hold socket lock + * (it's changed only by userspace request in setsockopt()). + */ + struct tcp_ao_key *current_key; + struct tcp_ao_key *rnext_key; + struct tcp_ao_counters counters; + u32 ao_required :1, + accept_icmps :1, + __unused :30; + __be32 lisn; + __be32 risn; + /* Sequence Number Extension (SNE) are upper 4 bytes for SEQ, + * that protect TCP-AO connection from replayed old TCP segments. + * See RFC5925 (6.2). + * In order to get correct SNE, there's a helper tcp_ao_compute_sne(). + * It needs SEQ basis to understand whereabouts are lower SEQ numbers. + * According to that basis vector, it can provide incremented SNE + * when SEQ rolls over or provide decremented SNE when there's + * a retransmitted segment from before-rolling over. + * - for request sockets such basis is rcv_isn/snt_isn, which seems + * good enough as it's unexpected to receive 4 Gbytes on reqsk. + * - for full sockets the basis is rcv_nxt/snd_una. snd_una is + * taken instead of snd_nxt as currently it's easier to track + * in tcp_snd_una_update(), rather than updating SNE in all + * WRITE_ONCE(tp->snd_nxt, ...) + * - for time-wait sockets the basis is tw_rcv_nxt/tw_snd_nxt. + * tw_snd_nxt is not expected to change, while tw_rcv_nxt may. + */ + u32 snd_sne; + u32 rcv_sne; + refcount_t refcnt; /* Protects twsk destruction */ + struct rcu_head rcu; +}; + +#ifdef CONFIG_TCP_MD5SIG +#include <linux/jump_label.h> +extern struct static_key_false_deferred tcp_md5_needed; +#define static_branch_tcp_md5() static_branch_unlikely(&tcp_md5_needed.key) +#else +#define static_branch_tcp_md5() false +#endif +#ifdef CONFIG_TCP_AO +/* TCP-AO structures and functions */ +#include <linux/jump_label.h> +extern struct static_key_false_deferred tcp_ao_needed; +#define static_branch_tcp_ao() static_branch_unlikely(&tcp_ao_needed.key) +#else +#define static_branch_tcp_ao() false +#endif + +static inline bool tcp_hash_should_produce_warnings(void) +{ + return static_branch_tcp_md5() || static_branch_tcp_ao(); +} + +#define tcp_hash_fail(msg, family, skb, fmt, ...) \ +do { \ + const struct tcphdr *th = tcp_hdr(skb); \ + char hdr_flags[6]; \ + char *f = hdr_flags; \ + \ + if (!tcp_hash_should_produce_warnings()) \ + break; \ + if (th->fin) \ + *f++ = 'F'; \ + if (th->syn) \ + *f++ = 'S'; \ + if (th->rst) \ + *f++ = 'R'; \ + if (th->psh) \ + *f++ = 'P'; \ + if (th->ack) \ + *f++ = '.'; \ + *f = 0; \ + if ((family) == AF_INET) { \ + net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \ + msg, &ip_hdr(skb)->saddr, ntohs(th->source), \ + &ip_hdr(skb)->daddr, ntohs(th->dest), \ + hdr_flags, ##__VA_ARGS__); \ + } else { \ + net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \ + msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \ + &ipv6_hdr(skb)->daddr, ntohs(th->dest), \ + hdr_flags, ##__VA_ARGS__); \ + } \ +} while (0) + +#ifdef CONFIG_TCP_AO +/* TCP-AO structures and functions */ +struct tcp4_ao_context { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + __be32 sisn; + __be32 disn; +}; + +struct tcp6_ao_context { + struct in6_addr saddr; + struct in6_addr daddr; + __be16 sport; + __be16 dport; + __be32 sisn; + __be32 disn; +}; + +struct tcp_sigpool; +#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \ + TCPF_CLOSE | TCPF_CLOSE_WAIT | \ + TCPF_LAST_ACK | TCPF_CLOSING) + +int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb, + struct tcp_ao_key *key, struct tcphdr *th, + __u8 *hash_location); +int tcp_ao_hash_skb(unsigned short int family, + char *ao_hash, struct tcp_ao_key *key, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family, + sockptr_t optval, int optlen); +struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao, + int sndid, int rcvid); +int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk, + struct request_sock *req, struct sk_buff *skb, + int family); +int tcp_ao_calc_traffic_key(struct tcp_ao_key *mkt, u8 *key, void *ctx, + unsigned int len, struct tcp_sigpool *hp); +void tcp_ao_destroy_sock(struct sock *sk, bool twsk); +void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp); +bool tcp_ao_ignore_icmp(const struct sock *sk, int family, int type, int code); +int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen); +int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen); +int tcp_ao_get_repair(struct sock *sk, sockptr_t optval, sockptr_t optlen); +int tcp_ao_set_repair(struct sock *sk, sockptr_t optval, unsigned int optlen); +enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk, + const struct sk_buff *skb, unsigned short int family, + const struct request_sock *req, int l3index, + const struct tcp_ao_hdr *aoh); +u32 tcp_ao_compute_sne(u32 next_sne, u32 next_seq, u32 seq); +struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, int l3index, + const union tcp_ao_addr *addr, + int family, int sndid, int rcvid); +int tcp_ao_hash_hdr(unsigned short family, char *ao_hash, + struct tcp_ao_key *key, const u8 *tkey, + const union tcp_ao_addr *daddr, + const union tcp_ao_addr *saddr, + const struct tcphdr *th, u32 sne); +int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb, + const struct tcp_ao_hdr *aoh, int l3index, u32 seq, + struct tcp_ao_key **key, char **traffic_key, + bool *allocated_traffic_key, u8 *keyid, u32 *sne); + +/* ipv4 specific functions */ +int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen); +struct tcp_ao_key *tcp_v4_ao_lookup(const struct sock *sk, struct sock *addr_sk, + int sndid, int rcvid); +int tcp_v4_ao_synack_hash(char *ao_hash, struct tcp_ao_key *mkt, + struct request_sock *req, const struct sk_buff *skb, + int hash_offset, u32 sne); +int tcp_v4_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key, + const struct sock *sk, + __be32 sisn, __be32 disn, bool send); +int tcp_v4_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key, + struct request_sock *req); +struct tcp_ao_key *tcp_v4_ao_lookup_rsk(const struct sock *sk, + struct request_sock *req, + int sndid, int rcvid); +int tcp_v4_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +/* ipv6 specific functions */ +int tcp_v6_ao_hash_pseudoheader(struct tcp_sigpool *hp, + const struct in6_addr *daddr, + const struct in6_addr *saddr, int nbytes); +int tcp_v6_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key, + const struct sk_buff *skb, __be32 sisn, __be32 disn); +int tcp_v6_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key, + const struct sock *sk, __be32 sisn, + __be32 disn, bool send); +int tcp_v6_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key, + struct request_sock *req); +struct tcp_ao_key *tcp_v6_ao_lookup(const struct sock *sk, + struct sock *addr_sk, int sndid, int rcvid); +struct tcp_ao_key *tcp_v6_ao_lookup_rsk(const struct sock *sk, + struct request_sock *req, + int sndid, int rcvid); +int tcp_v6_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key, + const struct sock *sk, const struct sk_buff *skb, + const u8 *tkey, int hash_offset, u32 sne); +int tcp_v6_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen); +int tcp_v6_ao_synack_hash(char *ao_hash, struct tcp_ao_key *ao_key, + struct request_sock *req, const struct sk_buff *skb, + int hash_offset, u32 sne); +void tcp_ao_established(struct sock *sk); +void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb); +void tcp_ao_connect_init(struct sock *sk); +void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb, + struct tcp_request_sock *treq, + unsigned short int family, int l3index); +#else /* CONFIG_TCP_AO */ + +static inline int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb, + struct tcp_ao_key *key, struct tcphdr *th, + __u8 *hash_location) +{ + return 0; +} + +static inline void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb, + struct tcp_request_sock *treq, + unsigned short int family, int l3index) +{ +} + +static inline bool tcp_ao_ignore_icmp(const struct sock *sk, int family, + int type, int code) +{ + return false; +} + +static inline enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk, + const struct sk_buff *skb, unsigned short int family, + const struct request_sock *req, int l3index, + const struct tcp_ao_hdr *aoh) +{ + return SKB_NOT_DROPPED_YET; +} + +static inline struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, + int l3index, const union tcp_ao_addr *addr, + int family, int sndid, int rcvid) +{ + return NULL; +} + +static inline void tcp_ao_destroy_sock(struct sock *sk, bool twsk) +{ +} + +static inline void tcp_ao_established(struct sock *sk) +{ +} + +static inline void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb) +{ +} + +static inline void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, + struct tcp_sock *tp) +{ +} + +static inline void tcp_ao_connect_init(struct sock *sk) +{ +} + +static inline int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen) +{ + return -ENOPROTOOPT; +} + +static inline int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen) +{ + return -ENOPROTOOPT; +} + +static inline int tcp_ao_get_repair(struct sock *sk, + sockptr_t optval, sockptr_t optlen) +{ + return -ENOPROTOOPT; +} + +static inline int tcp_ao_set_repair(struct sock *sk, + sockptr_t optval, unsigned int optlen) +{ + return -ENOPROTOOPT; +} +#endif + +#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) +int tcp_do_parse_auth_options(const struct tcphdr *th, + const u8 **md5_hash, const u8 **ao_hash); +#else +static inline int tcp_do_parse_auth_options(const struct tcphdr *th, + const u8 **md5_hash, const u8 **ao_hash) +{ + *md5_hash = NULL; + *ao_hash = NULL; + return 0; +} +#endif + +#endif /* _TCP_AO_H */ diff --git a/include/net/tcx.h b/include/net/tcx.h index 264f147953..04be937778 100644 --- a/include/net/tcx.h +++ b/include/net/tcx.h @@ -38,16 +38,11 @@ static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry) return container_of(bundle, struct tcx_entry, bundle); } -static inline struct tcx_link *tcx_link(struct bpf_link *link) +static inline struct tcx_link *tcx_link(const struct bpf_link *link) { return container_of(link, struct tcx_link, link); } -static inline const struct tcx_link *tcx_link_const(const struct bpf_link *link) -{ - return tcx_link((struct bpf_link *)link); -} - void tcx_inc(void); void tcx_dec(void); diff --git a/include/net/tls.h b/include/net/tls.h index a2b44578dc..340ad43971 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -61,7 +61,8 @@ struct tls_rec; #define TLS_AAD_SPACE_SIZE 13 -#define MAX_IV_SIZE 16 +#define TLS_MAX_IV_SIZE 16 +#define TLS_MAX_SALT_SIZE 4 #define TLS_TAG_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8 #define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE @@ -96,9 +97,6 @@ struct tls_sw_context_tx { struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; - /* protect crypto_wait with encrypt_pending */ - spinlock_t encrypt_compl_lock; - int async_notify; u8 async_capable:1; #define BIT_TX_SCHEDULED 0 @@ -135,8 +133,6 @@ struct tls_sw_context_rx { struct tls_strparser strp; atomic_t decrypt_pending; - /* protect crypto_wait with decrypt_pending*/ - spinlock_t decrypt_compl_lock; struct sk_buff_head async_hold; struct wait_queue_head wq; }; @@ -149,6 +145,7 @@ struct tls_record_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; +#define TLS_DRIVER_STATE_SIZE_TX 16 struct tls_offload_context_tx { struct crypto_aead *aead_send; spinlock_t lock; /* protects records list */ @@ -162,17 +159,13 @@ struct tls_offload_context_tx { void (*sk_destruct)(struct sock *sk); struct work_struct destruct_work; struct tls_context *ctx; - u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough * driver specific state to justify another layer of indirection */ -#define TLS_DRIVER_STATE_SIZE_TX 16 + u8 driver_state[TLS_DRIVER_STATE_SIZE_TX] __aligned(8); }; -#define TLS_OFFLOAD_CONTEXT_SIZE_TX \ - (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) - enum tls_context_flags { /* tls_device_down was called after the netdev went down, device state * was released, and kTLS works in software, even though rx_conf is @@ -193,8 +186,8 @@ enum tls_context_flags { }; struct cipher_context { - char *iv; - char *rec_seq; + char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE]; + char rec_seq[TLS_MAX_REC_SEQ_SIZE]; }; union tls_crypto_context { @@ -302,6 +295,7 @@ struct tls_offload_resync_async { u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; }; +#define TLS_DRIVER_STATE_SIZE_RX 8 struct tls_offload_context_rx { /* sw must be the first member of tls_offload_context_rx */ struct tls_sw_context_rx sw; @@ -325,17 +319,13 @@ struct tls_offload_context_rx { struct tls_offload_resync_async *resync_async; }; }; - u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough * driver specific state to justify another layer of indirection */ -#define TLS_DRIVER_STATE_SIZE_RX 8 + u8 driver_state[TLS_DRIVER_STATE_SIZE_RX] __aligned(8); }; -#define TLS_OFFLOAD_CONTEXT_SIZE_RX \ - (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) - struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 29251c3519..d716214fe0 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -154,13 +154,30 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, - struct net_device *dev, struct in6_addr *saddr, - struct in6_addr *daddr, + struct net_device *dev, + const struct in6_addr *saddr, + const struct in6_addr *daddr, __u8 prio, __u8 ttl, __be32 label, __be16 src_port, __be16 dst_port, bool nocheck); void udp_tunnel_sock_release(struct socket *sock); +struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, + struct net_device *dev, + struct net *net, int oif, + __be32 *saddr, + const struct ip_tunnel_key *key, + __be16 sport, __be16 dport, u8 tos, + struct dst_cache *dst_cache); +struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb, + struct net_device *dev, + struct net *net, + struct socket *sock, int oif, + struct in6_addr *saddr, + const struct ip_tunnel_key *key, + __be16 sport, __be16 dport, u8 dsfield, + struct dst_cache *dst_cache); + struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, __be16 flags, __be64 tunnel_id, int md_size); diff --git a/include/net/xdp.h b/include/net/xdp.h index de08c8e0d1..349c36fb5f 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -383,14 +383,25 @@ void xdp_attachment_setup(struct xdp_attachment_info *info, #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE +/* Define the relationship between xdp-rx-metadata kfunc and + * various other entities: + * - xdp_rx_metadata enum + * - netdev netlink enum (Documentation/netlink/specs/netdev.yaml) + * - kfunc name + * - xdp_metadata_ops field + */ #define XDP_METADATA_KFUNC_xxx \ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \ - bpf_xdp_metadata_rx_timestamp) \ + NETDEV_XDP_RX_METADATA_TIMESTAMP, \ + bpf_xdp_metadata_rx_timestamp, \ + xmo_rx_timestamp) \ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \ - bpf_xdp_metadata_rx_hash) \ + NETDEV_XDP_RX_METADATA_HASH, \ + bpf_xdp_metadata_rx_hash, \ + xmo_rx_hash) \ -enum { -#define XDP_METADATA_KFUNC(name, _) name, +enum xdp_rx_metadata { +#define XDP_METADATA_KFUNC(name, _, __, ___) name, XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC MAX_XDP_METADATA_KFUNC, diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 69b472604b..f83128007f 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -63,6 +63,13 @@ struct xdp_sock { struct xsk_queue *tx ____cacheline_aligned_in_smp; struct list_head tx_list; + /* record the number of tx descriptors sent by this xsk and + * when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs + * to be given to other xsks for sending tx descriptors, thereby + * preventing other XSKs from being starved. + */ + u32 tx_budget_spent; + /* Protects generic receive. */ spinlock_t rx_lock; @@ -109,4 +116,13 @@ static inline void __xsk_map_flush(void) #endif /* CONFIG_XDP_SOCKETS */ +#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET) +bool xsk_map_check_flush(void); +#else +static inline bool xsk_map_check_flush(void) +{ + return false; +} +#endif + #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 363c7d5105..c9bb0f892f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1207,20 +1207,20 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1); } -int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, +int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse); -static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, +static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family) { - return __xfrm_decode_session(skb, fl, family, 0); + return __xfrm_decode_session(net, skb, fl, family, 0); } -static inline int xfrm_decode_session_reverse(struct sk_buff *skb, +static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family) { - return __xfrm_decode_session(skb, fl, family, 1); + return __xfrm_decode_session(net, skb, fl, family, 1); } int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); @@ -1296,7 +1296,7 @@ static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *sk { return 1; } -static inline int xfrm_decode_session_reverse(struct sk_buff *skb, +static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, struct flowi *fl, unsigned int family) { @@ -1669,7 +1669,6 @@ int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb); #endif void xfrm_local_error(struct sk_buff *skb, int mtu); -int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm4_transport_finish(struct sk_buff *skb, int async); @@ -1689,7 +1688,6 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char prot int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); void xfrm4_local_error(struct sk_buff *skb, u32 mtu); -int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, struct ip6_tnl *t); int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, @@ -1712,6 +1710,10 @@ int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); +struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + struct sk_buff *skb); +struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, + struct sk_buff *skb); int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen); #else @@ -2166,7 +2168,7 @@ static inline bool xfrm6_local_dontfrag(const struct sock *sk) proto = sk->sk_protocol; if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) - return inet6_sk(sk)->dontfrag; + return inet6_test_bit(DONTFRAG, sk); return false; } diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 2e3843b761..3f1b58d8b4 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -277,6 +277,8 @@ enum ib_port_capability_mask2_bits { IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4, IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5, IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10, + IB_PORT_EXTENDED_SPEEDS2_SUP = 1 << 11, + IB_PORT_LINK_SPEED_XDR_SUP = 1 << 12, }; #define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 62f9d126a7..b7b6b58dd3 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -561,6 +561,7 @@ enum ib_port_speed { IB_SPEED_EDR = 32, IB_SPEED_HDR = 64, IB_SPEED_NDR = 128, + IB_SPEED_XDR = 256, }; enum ib_stat_flag { @@ -607,7 +608,7 @@ struct rdma_hw_stats { const struct rdma_stat_desc *descs; unsigned long *is_disabled; int num_counters; - u64 value[]; + u64 value[] __counted_by(num_counters); }; #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 @@ -840,6 +841,7 @@ enum ib_rate { IB_RATE_50_GBPS = 20, IB_RATE_400_GBPS = 21, IB_RATE_600_GBPS = 22, + IB_RATE_800_GBPS = 23, }; /** @@ -1094,7 +1096,7 @@ struct ib_qp_cap { /* * Maximum number of rdma_rw_ctx structures in flight at a time. - * ib_create_qp() will calculate the right amount of neededed WRs + * ib_create_qp() will calculate the right amount of needed WRs * and MRs based on this. */ u32 max_rdma_ctxs; @@ -2608,6 +2610,8 @@ struct ib_device_ops { int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp); int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); + int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq); + int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq); /* Device lifecycle callbacks */ /* diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 8a43534eea..f5257103fd 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -404,8 +404,6 @@ cmd_to_domain_dev(struct scsi_cmnd *cmd) return sdev_to_domain_dev(cmd->device); } -void sas_hash_addr(u8 *hashed, const u8 *sas_addr); - /* Before calling a notify event, LLDD should use this function * when the link is severed (possibly from its tasklet). * The idea is that the Class only reads those, while the LLDD, @@ -681,7 +679,6 @@ extern void sas_resume_ha(struct sas_ha_struct *sas_ha); extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha); extern void sas_suspend_ha(struct sas_ha_struct *sas_ha); -int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates); int sas_phy_reset(struct sas_phy *phy, int hard_reset); int sas_phy_enable(struct sas_phy *phy, int enable); extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); @@ -699,20 +696,6 @@ extern struct scsi_transport_template * sas_domain_attach_transport(struct sas_domain_function_template *); extern struct device_attribute dev_attr_phy_event_threshold; -int sas_discover_root_expander(struct domain_device *); - -int sas_ex_revalidate_domain(struct domain_device *); - -void sas_unregister_domain_devices(struct asd_sas_port *port, int gone); -void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *); -void sas_discover_event(struct asd_sas_port *, enum discover_event ev); - -int sas_discover_end_dev(struct domain_device *); - -void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *); - -void sas_init_dev(struct domain_device *); - void sas_task_abort(struct sas_task *); int sas_eh_abort_handler(struct scsi_cmnd *cmd); int sas_eh_device_reset_handler(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 8fa1153f37..c38f4fe5e6 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -100,10 +100,6 @@ struct scsi_vpd { unsigned char data[]; }; -enum scsi_vpd_parameters { - SCSI_VPD_HEADER_SIZE = 4, -}; - struct scsi_device { struct Scsi_Host *host; struct request_queue *request_queue; @@ -208,6 +204,7 @@ struct scsi_device { unsigned use_10_for_rw:1; /* first try 10-byte read / write */ unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */ + unsigned read_before_ms:1; /* perform a READ before MODE SENSE */ unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ unsigned no_write_same:1; /* no WRITE SAME command */ unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */ @@ -219,7 +216,6 @@ struct scsi_device { unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ unsigned no_start_on_add:1; /* do not issue start on add */ unsigned allow_restart:1; /* issue START_UNIT in error handler */ - unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ unsigned select_no_atn:1; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 4c2dc8150c..3b907fc2ef 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -245,6 +245,9 @@ struct scsi_host_template { * midlayer calls this point so that the driver may deallocate * and terminate any references to the target. * + * Note: This callback is called with the host lock held and hence + * must not sleep. + * * Status: OPTIONAL */ void (* target_destroy)(struct scsi_target *); diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index eb5079904c..af793f2a0e 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -258,7 +258,7 @@ static inline int qe_alive_during_sleep(void) /* Structure that defines QE firmware binary files. * - * See Documentation/powerpc/qe_firmware.rst for a description of these + * See Documentation/arch/powerpc/qe_firmware.rst for a description of these * fields. */ struct qe_firmware { diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h index 3eebabcb28..39cd44cec9 100644 --- a/include/soc/rockchip/rk3399_grf.h +++ b/include/soc/rockchip/rk3399_grf.h @@ -11,11 +11,8 @@ /* PMU GRF Registers */ #define RK3399_PMUGRF_OS_REG2 0x308 -#define RK3399_PMUGRF_DDRTYPE_SHIFT 13 -#define RK3399_PMUGRF_DDRTYPE_MASK 7 -#define RK3399_PMUGRF_DDRTYPE_DDR3 3 -#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5 -#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6 -#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7 +#define RK3399_PMUGRF_OS_REG2_DDRTYPE GENMASK(15, 13) +#define RK3399_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2) +#define RK3399_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18) #endif diff --git a/include/soc/rockchip/rk3568_grf.h b/include/soc/rockchip/rk3568_grf.h new file mode 100644 index 0000000000..52853efd67 --- /dev/null +++ b/include/soc/rockchip/rk3568_grf.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __SOC_RK3568_GRF_H +#define __SOC_RK3568_GRF_H + +#define RK3568_PMUGRF_OS_REG2 0x208 +#define RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13) +#define RK3568_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2) + +#define RK3568_PMUGRF_OS_REG3 0x20c +#define RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12) +#define RK3568_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28) + +#endif /* __SOC_RK3568_GRF_H */ diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h new file mode 100644 index 0000000000..630b35a550 --- /dev/null +++ b/include/soc/rockchip/rk3588_grf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __SOC_RK3588_GRF_H +#define __SOC_RK3588_GRF_H + +#define RK3588_PMUGRF_OS_REG2 0x208 +#define RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13) +#define RK3588_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2) +#define RK3588_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18) +#define RK3588_PMUGRF_OS_REG2_CH_INFO GENMASK(29, 28) + +#define RK3588_PMUGRF_OS_REG3 0x20c +#define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12) +#define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28) + +#define RK3588_PMUGRF_OS_REG4 0x210 +#define RK3588_PMUGRF_OS_REG5 0x214 + +#endif /* __SOC_RK3588_GRF_H */ diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h new file mode 100644 index 0000000000..e46fd72aea --- /dev/null +++ b/include/soc/rockchip/rockchip_grf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Rockchip General Register Files definitions + */ + +#ifndef __SOC_ROCKCHIP_GRF_H +#define __SOC_ROCKCHIP_GRF_H + +/* Rockchip DDRTYPE defines */ +enum { + ROCKCHIP_DDRTYPE_DDR3 = 3, + ROCKCHIP_DDRTYPE_LPDDR2 = 5, + ROCKCHIP_DDRTYPE_LPDDR3 = 6, + ROCKCHIP_DDRTYPE_LPDDR4 = 7, + ROCKCHIP_DDRTYPE_LPDDR4X = 8, +}; + +#endif /* __SOC_ROCKCHIP_GRF_H */ diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h index ecefcaec7e..6b995a8f0f 100644 --- a/include/soc/tegra/bpmp-abi.h +++ b/include/soc/tegra/bpmp-abi.h @@ -1194,7 +1194,7 @@ struct cmd_clk_is_enabled_request { */ struct cmd_clk_is_enabled_response { /** - * @brief The state of the clock that has been succesfully + * @brief The state of the clock that has been successfully * requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the * master invoking the command earlier. * diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index a5ef84944a..71ae37d3be 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -96,7 +96,6 @@ struct tegra_smmu_soc { struct tegra_mc; struct tegra_smmu; -struct gart_device; #ifdef CONFIG_TEGRA_IOMMU_SMMU struct tegra_smmu *tegra_smmu_probe(struct device *dev, @@ -116,28 +115,6 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu) } #endif -#ifdef CONFIG_TEGRA_IOMMU_GART -struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc); -int tegra_gart_suspend(struct gart_device *gart); -int tegra_gart_resume(struct gart_device *gart); -#else -static inline struct gart_device * -tegra_gart_probe(struct device *dev, struct tegra_mc *mc) -{ - return ERR_PTR(-ENODEV); -} - -static inline int tegra_gart_suspend(struct gart_device *gart) -{ - return -ENODEV; -} - -static inline int tegra_gart_resume(struct gart_device *gart) -{ - return -ENODEV; -} -#endif - struct tegra_mc_reset { const char *name; unsigned long id; @@ -185,8 +162,6 @@ struct tegra_mc_ops { */ int (*probe)(struct tegra_mc *mc); void (*remove)(struct tegra_mc *mc); - int (*suspend)(struct tegra_mc *mc); - int (*resume)(struct tegra_mc *mc); irqreturn_t (*handle_irq)(int irq, void *data); int (*probe_device)(struct tegra_mc *mc, struct device *dev); }; @@ -225,7 +200,6 @@ struct tegra_mc { struct tegra_bpmp *bpmp; struct device *dev; struct tegra_smmu *smmu; - struct gart_device *gart; void __iomem *regs; void __iomem *bcast_ch_regs; void __iomem **ch_regs; diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h index 2fe8c6b0d4..68e053fe73 100644 --- a/include/sound/cs35l41.h +++ b/include/sound/cs35l41.h @@ -735,6 +735,7 @@ #define CS35L41_REVID_B2 0xB2 #define CS35L41_HALO_CORE_RESET 0x00000200 +#define CS35L41_SOFTWARE_RESET 0x5A000000 #define CS35L41_FS1_WINDOW_MASK 0x000007FF #define CS35L41_FS2_WINDOW_MASK 0x00FFF800 @@ -815,6 +816,8 @@ struct cs35l41_otp_map_element_t { }; enum cs35l41_cspl_mbox_status { + CSPL_MBOX_STS_ERROR = U32_MAX, + CSPL_MBOX_STS_ERROR2 = 0x00ffffff, // firmware not always sign-extending 24-bit value CSPL_MBOX_STS_RUNNING = 0, CSPL_MBOX_STS_PAUSED = 1, CSPL_MBOX_STS_RDY_FOR_REINIT = 2, @@ -903,6 +906,6 @@ int cs35l41_init_boost(struct device *dev, struct regmap *regmap, bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type); int cs35l41_mdsync_up(struct regmap *regmap); int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type, - int enable, bool firmware_running); + int enable, struct cs_dsp *dsp); #endif /* __CS35L41_H */ diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 3950322bf3..8c18e8b6d2 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -242,9 +242,8 @@ #define CS35L56_CONTROL_PORT_READY_US 2200 #define CS35L56_HALO_STATE_POLL_US 1000 #define CS35L56_HALO_STATE_TIMEOUT_US 50000 -#define CS35L56_HIBERNATE_WAKE_POLL_US 500 -#define CS35L56_HIBERNATE_WAKE_TIMEOUT_US 5000 #define CS35L56_RESET_PULSE_MIN_US 1100 +#define CS35L56_WAKE_HOLD_TIME_US 1000 #define CS35L56_SDW1_PLAYBACK_PORT 1 #define CS35L56_SDW1_CAPTURE_PORT 3 diff --git a/include/sound/graph_card.h b/include/sound/graph_card.h index 4c8b94c77b..8e2e15dfcb 100644 --- a/include/sound/graph_card.h +++ b/include/sound/graph_card.h @@ -9,27 +9,27 @@ #include <sound/simple_card_utils.h> -typedef int (*GRAPH2_CUSTOM)(struct asoc_simple_priv *priv, +typedef int (*GRAPH2_CUSTOM)(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); struct graph2_custom_hooks { - int (*hook_pre)(struct asoc_simple_priv *priv); - int (*hook_post)(struct asoc_simple_priv *priv); + int (*hook_pre)(struct simple_util_priv *priv); + int (*hook_post)(struct simple_util_priv *priv); GRAPH2_CUSTOM custom_normal; GRAPH2_CUSTOM custom_dpcm; GRAPH2_CUSTOM custom_c2c; }; -int audio_graph_parse_of(struct asoc_simple_priv *priv, struct device *dev); -int audio_graph2_parse_of(struct asoc_simple_priv *priv, struct device *dev, +int audio_graph_parse_of(struct simple_util_priv *priv, struct device *dev); +int audio_graph2_parse_of(struct simple_util_priv *priv, struct device *dev, struct graph2_custom_hooks *hooks); -int audio_graph2_link_normal(struct asoc_simple_priv *priv, +int audio_graph2_link_normal(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); -int audio_graph2_link_dpcm(struct asoc_simple_priv *priv, +int audio_graph2_link_dpcm(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); -int audio_graph2_link_c2c(struct asoc_simple_priv *priv, +int audio_graph2_link_c2c(struct simple_util_priv *priv, struct device_node *lnk, struct link_info *li); #endif /* __GRAPH_CARD_H */ diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h index 9c7872c0ca..55958711d6 100644 --- a/include/sound/hda_register.h +++ b/include/sound/hda_register.h @@ -91,6 +91,8 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_REG_SD_BDLPL 0x18 #define AZX_REG_SD_BDLPU 0x1c +#define AZX_SD_FIFOSIZE_MASK GENMASK(15, 0) + /* GTS registers */ #define AZX_REG_LLCH 0x14 diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 32c59053b4..dd7c87bbc6 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -573,7 +573,7 @@ void snd_hdac_stream_release(struct hdac_stream *azx_dev); struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus, int dir, int stream_tag); -int snd_hdac_stream_setup(struct hdac_stream *azx_dev); +int snd_hdac_stream_setup(struct hdac_stream *azx_dev, bool code_loading); void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev); int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev); int snd_hdac_stream_set_params(struct hdac_stream *azx_dev, @@ -624,6 +624,9 @@ int snd_hdac_stream_set_lpib(struct hdac_stream *azx_dev, u32 value); #define snd_hdac_stream_readb_poll(dev, reg, val, cond, delay_us, timeout_us) \ read_poll_timeout_atomic(snd_hdac_reg_readb, val, cond, delay_us, timeout_us, \ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) +#define snd_hdac_stream_readw_poll(dev, reg, val, cond, delay_us, timeout_us) \ + read_poll_timeout_atomic(snd_hdac_reg_readw, val, cond, delay_us, timeout_us, \ + false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_readl_poll(dev, reg, val, cond, delay_us, timeout_us) \ read_poll_timeout_atomic(snd_hdac_reg_readl, val, cond, delay_us, timeout_us, \ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg) diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 511211f4a2..a8bebac1e4 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -60,6 +60,8 @@ struct hdac_ext_stream { bool link_locked:1; bool link_prepared; + int (*host_setup)(struct hdac_stream *, bool); + struct snd_pcm_substream *link_substream; }; @@ -86,6 +88,7 @@ void snd_hdac_ext_stream_start(struct hdac_ext_stream *hext_stream); void snd_hdac_ext_stream_clear(struct hdac_ext_stream *hext_stream); void snd_hdac_ext_stream_reset(struct hdac_ext_stream *hext_stream); int snd_hdac_ext_stream_setup(struct hdac_ext_stream *hext_stream, int fmt); +int snd_hdac_ext_host_stream_setup(struct hdac_ext_stream *hext_stream, bool code_loading); struct hdac_ext_link { struct hdac_bus *bus; diff --git a/include/sound/max9768.h b/include/sound/max9768.h index 0f78b41d03..8509ba0079 100644 --- a/include/sound/max9768.h +++ b/include/sound/max9768.h @@ -9,14 +9,10 @@ /** * struct max9768_pdata - optional platform specific MAX9768 configuration - * @shdn_gpio: GPIO to SHDN pin. If not valid, pin must be hardwired HIGH - * @mute_gpio: GPIO to MUTE pin. If not valid, control for mute won't be added * @flags: configuration flags, e.g. set classic PWM mode (check datasheet * regarding "filterless modulation" which is default). */ struct max9768_pdata { - int shdn_gpio; - int mute_gpio; unsigned flags; #define MAX9768_FLAG_CLASSIC_PWM (1 << 0) }; diff --git a/include/sound/opl3.h b/include/sound/opl3.h index ebf3852da9..052395a2f8 100644 --- a/include/sound/opl3.h +++ b/include/sound/opl3.h @@ -229,7 +229,7 @@ struct fm_operator { unsigned char attack_decay; unsigned char sustain_release; unsigned char wave_select; -} __attribute__((packed)); +} __packed; /* Instrument data */ struct fm_instrument { diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h index d264e5463f..2e999916db 100644 --- a/include/sound/simple_card.h +++ b/include/sound/simple_card.h @@ -12,15 +12,15 @@ #include <sound/soc.h> #include <sound/simple_card_utils.h> -struct asoc_simple_card_info { +struct simple_util_info { const char *name; const char *card; const char *codec; const char *platform; unsigned int daifmt; - struct asoc_simple_dai cpu_dai; - struct asoc_simple_dai codec_dai; + struct simple_util_dai cpu_dai; + struct simple_util_dai codec_dai; }; #endif /* __SIMPLE_CARD_H */ diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index d1a95bc33c..e5da10b4c4 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -11,18 +11,18 @@ #include <linux/clk.h> #include <sound/soc.h> -#define asoc_simple_init_hp(card, sjack, prefix) \ - asoc_simple_init_jack(card, sjack, 1, prefix, NULL) -#define asoc_simple_init_mic(card, sjack, prefix) \ - asoc_simple_init_jack(card, sjack, 0, prefix, NULL) +#define simple_util_init_hp(card, sjack, prefix) \ + simple_util_init_jack(card, sjack, 1, prefix, NULL) +#define simple_util_init_mic(card, sjack, prefix) \ + simple_util_init_jack(card, sjack, 0, prefix, NULL) -struct asoc_simple_tdm_width_map { +struct simple_util_tdm_width_map { u8 sample_bits; u8 slot_count; u16 slot_width; }; -struct asoc_simple_dai { +struct simple_util_dai { const char *name; unsigned int sysclk; int clk_direction; @@ -32,17 +32,17 @@ struct asoc_simple_dai { unsigned int rx_slot_mask; struct clk *clk; bool clk_fixed; - struct asoc_simple_tdm_width_map *tdm_width_map; + struct simple_util_tdm_width_map *tdm_width_map; int n_tdm_widths; }; -struct asoc_simple_data { +struct simple_util_data { u32 convert_rate; u32 convert_channels; const char *convert_sample_format; }; -struct asoc_simple_jack { +struct simple_util_jack { struct snd_soc_jack jack; struct snd_soc_jack_pin pin; struct snd_soc_jack_gpio gpio; @@ -54,21 +54,21 @@ struct prop_nums { int platforms; }; -struct asoc_simple_priv { +struct simple_util_priv { struct snd_soc_card snd_card; struct simple_dai_props { - struct asoc_simple_dai *cpu_dai; - struct asoc_simple_dai *codec_dai; - struct asoc_simple_data adata; + struct simple_util_dai *cpu_dai; + struct simple_util_dai *codec_dai; + struct simple_util_data adata; struct snd_soc_codec_conf *codec_conf; struct prop_nums num; unsigned int mclk_fs; } *dai_props; - struct asoc_simple_jack hp_jack; - struct asoc_simple_jack mic_jack; + struct simple_util_jack hp_jack; + struct simple_util_jack mic_jack; struct snd_soc_jack *aux_jacks; struct snd_soc_dai_link *dai_link; - struct asoc_simple_dai *dais; + struct simple_util_dai *dais; struct snd_soc_dai_link_component *dlcs; struct snd_soc_codec_conf *codec_conf; struct gpio_desc *pa_gpio; @@ -130,75 +130,75 @@ struct link_info { struct prop_nums num[SNDRV_MAX_LINKS]; }; -int asoc_simple_parse_daifmt(struct device *dev, +int simple_util_parse_daifmt(struct device *dev, struct device_node *node, struct device_node *codec, char *prefix, unsigned int *retfmt); -int asoc_simple_parse_tdm_width_map(struct device *dev, struct device_node *np, - struct asoc_simple_dai *dai); +int simple_util_parse_tdm_width_map(struct device *dev, struct device_node *np, + struct simple_util_dai *dai); __printf(3, 4) -int asoc_simple_set_dailink_name(struct device *dev, +int simple_util_set_dailink_name(struct device *dev, struct snd_soc_dai_link *dai_link, const char *fmt, ...); -int asoc_simple_parse_card_name(struct snd_soc_card *card, +int simple_util_parse_card_name(struct snd_soc_card *card, char *prefix); -int asoc_simple_parse_clk(struct device *dev, +int simple_util_parse_clk(struct device *dev, struct device_node *node, - struct asoc_simple_dai *simple_dai, + struct simple_util_dai *simple_dai, struct snd_soc_dai_link_component *dlc); -int asoc_simple_startup(struct snd_pcm_substream *substream); -void asoc_simple_shutdown(struct snd_pcm_substream *substream); -int asoc_simple_hw_params(struct snd_pcm_substream *substream, +int simple_util_startup(struct snd_pcm_substream *substream); +void simple_util_shutdown(struct snd_pcm_substream *substream); +int simple_util_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); -int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd); -int asoc_simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, +int simple_util_dai_init(struct snd_soc_pcm_runtime *rtd); +int simple_util_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params); -#define asoc_simple_parse_tdm(np, dai) \ +#define simple_util_parse_tdm(np, dai) \ snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \ &(dai)->rx_slot_mask, \ &(dai)->slots, \ &(dai)->slot_width); -void asoc_simple_canonicalize_platform(struct snd_soc_dai_link_component *platforms, +void simple_util_canonicalize_platform(struct snd_soc_dai_link_component *platforms, struct snd_soc_dai_link_component *cpus); -void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link_component *cpus, +void simple_util_canonicalize_cpu(struct snd_soc_dai_link_component *cpus, int is_single_links); -void asoc_simple_clean_reference(struct snd_soc_card *card); +void simple_util_clean_reference(struct snd_soc_card *card); -void asoc_simple_parse_convert(struct device_node *np, char *prefix, - struct asoc_simple_data *data); -bool asoc_simple_is_convert_required(const struct asoc_simple_data *data); +void simple_util_parse_convert(struct device_node *np, char *prefix, + struct simple_util_data *data); +bool simple_util_is_convert_required(const struct simple_util_data *data); -int asoc_simple_parse_routing(struct snd_soc_card *card, +int simple_util_parse_routing(struct snd_soc_card *card, char *prefix); -int asoc_simple_parse_widgets(struct snd_soc_card *card, +int simple_util_parse_widgets(struct snd_soc_card *card, char *prefix); -int asoc_simple_parse_pin_switches(struct snd_soc_card *card, +int simple_util_parse_pin_switches(struct snd_soc_card *card, char *prefix); -int asoc_simple_init_jack(struct snd_soc_card *card, - struct asoc_simple_jack *sjack, +int simple_util_init_jack(struct snd_soc_card *card, + struct simple_util_jack *sjack, int is_hp, char *prefix, char *pin); -int asoc_simple_init_aux_jacks(struct asoc_simple_priv *priv, +int simple_util_init_aux_jacks(struct simple_util_priv *priv, char *prefix); -int asoc_simple_init_priv(struct asoc_simple_priv *priv, +int simple_util_init_priv(struct simple_util_priv *priv, struct link_info *li); -int asoc_simple_remove(struct platform_device *pdev); +void simple_util_remove(struct platform_device *pdev); -int asoc_graph_card_probe(struct snd_soc_card *card); -int asoc_graph_is_ports0(struct device_node *port); -int asoc_graph_parse_dai(struct device *dev, struct device_node *ep, +int graph_util_card_probe(struct snd_soc_card *card); +int graph_util_is_ports0(struct device_node *port); +int graph_util_parse_dai(struct device *dev, struct device_node *ep, struct snd_soc_dai_link_component *dlc, int *is_single_link); #ifdef DEBUG -static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, +static inline void simple_util_debug_dai(struct simple_util_priv *priv, char *name, - struct asoc_simple_dai *dai) + struct simple_util_dai *dai) { struct device *dev = simple_priv_to_dev(priv); @@ -228,7 +228,7 @@ static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, name, dai->clk_direction ? "OUT" : "IN"); } -static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) +static inline void simple_util_debug_info(struct simple_util_priv *priv) { struct snd_soc_card *card = simple_priv_to_card(priv); struct device *dev = simple_priv_to_dev(priv); @@ -241,7 +241,7 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) for (i = 0; i < card->num_links; i++) { struct simple_dai_props *props = simple_priv_to_props(priv, i); struct snd_soc_dai_link *link = simple_priv_to_link(priv, i); - struct asoc_simple_dai *dai; + struct simple_util_dai *dai; struct snd_soc_codec_conf *cnf; int j; @@ -249,10 +249,10 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) dev_dbg(dev, "cpu num = %d\n", link->num_cpus); for_each_prop_dai_cpu(props, j, dai) - asoc_simple_debug_dai(priv, "cpu", dai); + simple_util_debug_dai(priv, "cpu", dai); dev_dbg(dev, "codec num = %d\n", link->num_codecs); for_each_prop_dai_codec(props, j, dai) - asoc_simple_debug_dai(priv, "codec", dai); + simple_util_debug_dai(priv, "codec", dai); if (link->name) dev_dbg(dev, "dai name = %s\n", link->name); @@ -270,7 +270,7 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) } } #else -#define asoc_simple_debug_info(priv) +#define simple_util_debug_info(priv) #endif /* DEBUG */ #endif /* __SIMPLE_CARD_UTILS_H */ diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h index e49b97d9e3..845e7608ac 100644 --- a/include/sound/soc-acpi-intel-match.h +++ b/include/sound/soc-acpi-intel-match.h @@ -32,6 +32,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[]; @@ -42,6 +43,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[]; /* * generic table used for HDA codec-based platforms, possibly with diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h index e8ff2e089c..ecc02e9552 100644 --- a/include/sound/soc-card.h +++ b/include/sound/soc-card.h @@ -115,8 +115,8 @@ struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd; for_each_card_rtds(card, rtd) { - if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name)) - return asoc_rtd_to_codec(rtd, 0); + if (!strcmp(snd_soc_rtd_to_codec(rtd, 0)->name, dai_name)) + return snd_soc_rtd_to_codec(rtd, 0); } return NULL; diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 433543eb82..667ecd4daa 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -718,7 +718,7 @@ struct snd_soc_dapm_context { /* A list of widgets associated with an object, typically a snd_kcontrol */ struct snd_soc_dapm_widget_list { int num_widgets; - struct snd_soc_dapm_widget *widgets[]; + struct snd_soc_dapm_widget *widgets[] __counted_by(num_widgets); }; #define for_each_dapm_widgets(list, i, widget) \ diff --git a/include/sound/soc.h b/include/sound/soc.h index 49ec688eed..7792c393e2 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -10,6 +10,7 @@ #ifndef __LINUX_SND_SOC_H #define __LINUX_SND_SOC_H +#include <linux/args.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/types.h> @@ -775,36 +776,36 @@ struct snd_soc_dai_link { }; static inline struct snd_soc_dai_link_component* -asoc_link_to_cpu(struct snd_soc_dai_link *link, int n) { +snd_soc_link_to_cpu(struct snd_soc_dai_link *link, int n) { return &(link)->cpus[n]; } static inline struct snd_soc_dai_link_component* -asoc_link_to_codec(struct snd_soc_dai_link *link, int n) { +snd_soc_link_to_codec(struct snd_soc_dai_link *link, int n) { return &(link)->codecs[n]; } static inline struct snd_soc_dai_link_component* -asoc_link_to_platform(struct snd_soc_dai_link *link, int n) { +snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) { return &(link)->platforms[n]; } #define for_each_link_codecs(link, i, codec) \ for ((i) = 0; \ ((i) < link->num_codecs) && \ - ((codec) = asoc_link_to_codec(link, i)); \ + ((codec) = snd_soc_link_to_codec(link, i)); \ (i)++) #define for_each_link_platforms(link, i, platform) \ for ((i) = 0; \ ((i) < link->num_platforms) && \ - ((platform) = asoc_link_to_platform(link, i)); \ + ((platform) = snd_soc_link_to_platform(link, i)); \ (i)++) #define for_each_link_cpus(link, i, cpu) \ for ((i) = 0; \ ((i) < link->num_cpus) && \ - ((cpu) = asoc_link_to_cpu(link, i)); \ + ((cpu) = snd_soc_link_to_cpu(link, i)); \ (i)++) /* @@ -870,12 +871,8 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) { .platforms = platform, \ .num_platforms = ARRAY_SIZE(platform) -#define SND_SOC_DAILINK_REGx(_1, _2, _3, func, ...) func #define SND_SOC_DAILINK_REG(...) \ - SND_SOC_DAILINK_REGx(__VA_ARGS__, \ - SND_SOC_DAILINK_REG3, \ - SND_SOC_DAILINK_REG2, \ - SND_SOC_DAILINK_REG1)(__VA_ARGS__) + CONCATENATE(SND_SOC_DAILINK_REG, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) #define SND_SOC_DAILINK_DEF(name, def...) \ static struct snd_soc_dai_link_component name[] = { def } @@ -895,7 +892,7 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) { #define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", } extern struct snd_soc_dai_link_component null_dailink_component[0]; -extern struct snd_soc_dai_link_component asoc_dummy_dlc; +extern struct snd_soc_dai_link_component snd_soc_dummy_dlc; struct snd_soc_codec_conf { @@ -1113,8 +1110,8 @@ struct snd_soc_pcm_runtime { * dais = cpu_dai + codec_dai * see * soc_new_pcm_runtime() - * asoc_rtd_to_cpu() - * asoc_rtd_to_codec() + * snd_soc_rtd_to_cpu() + * snd_soc_rtd_to_codec() */ struct snd_soc_dai **dais; @@ -1142,10 +1139,11 @@ struct snd_soc_pcm_runtime { int num_components; struct snd_soc_component *components[]; /* CPU/Codec/Platform */ }; + /* see soc_new_pcm_runtime() */ -#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n] -#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus] -#define asoc_substream_to_rtd(substream) \ +#define snd_soc_rtd_to_cpu(rtd, n) (rtd)->dais[n] +#define snd_soc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus] +#define snd_soc_substream_to_rtd(substream) \ (struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream) #define for_each_rtd_components(rtd, i, component) \ @@ -1154,11 +1152,11 @@ struct snd_soc_pcm_runtime { (i)++) #define for_each_rtd_cpu_dais(rtd, i, dai) \ for ((i) = 0; \ - ((i) < rtd->dai_link->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \ + ((i) < rtd->dai_link->num_cpus) && ((dai) = snd_soc_rtd_to_cpu(rtd, i)); \ (i)++) #define for_each_rtd_codec_dais(rtd, i, dai) \ for ((i) = 0; \ - ((i) < rtd->dai_link->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \ + ((i) < rtd->dai_link->num_codecs) && ((dai) = snd_soc_rtd_to_codec(rtd, i)); \ (i)++) #define for_each_rtd_dais(rtd, i, dai) \ for ((i) = 0; \ diff --git a/include/sound/sof.h b/include/sound/sof.h index 51294f2ba3..268d0ca0f6 100644 --- a/include/sound/sof.h +++ b/include/sound/sof.h @@ -52,8 +52,8 @@ enum sof_dsp_power_states { /* Definitions for multiple IPCs */ enum sof_ipc_type { - SOF_IPC, - SOF_INTEL_IPC4, + SOF_IPC_TYPE_3, + SOF_IPC_TYPE_4, SOF_IPC_TYPE_COUNT }; diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h index 78568abe26..574a9d581f 100644 --- a/include/sound/sof/ipc4/header.h +++ b/include/sound/sof/ipc4/header.h @@ -106,12 +106,19 @@ enum sof_ipc4_global_msg { SOF_IPC4_GLB_SAVE_PIPELINE, SOF_IPC4_GLB_RESTORE_PIPELINE, - /* Loads library (using Code Load or HD/A Host Output DMA) */ + /* + * library loading + * + * Loads library (using Code Load or HD/A Host Output DMA) + */ SOF_IPC4_GLB_LOAD_LIBRARY, + /* + * Prepare the host DMA channel for library loading, must be followed by + * a SOF_IPC4_GLB_LOAD_LIBRARY message as the library loading step + */ + SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE, - /* 25: RESERVED - do not use */ - - SOF_IPC4_GLB_INTERNAL_MESSAGE = 26, + SOF_IPC4_GLB_INTERNAL_MESSAGE, /* Notification (FW to SW driver) */ SOF_IPC4_GLB_NOTIFICATION, @@ -508,6 +515,23 @@ struct sof_ipc4_notify_resource_data { uint32_t data[6]; } __packed __aligned(4); +#define SOF_IPC4_DEBUG_DESCRIPTOR_SIZE 12 /* 3 x u32 */ + +/* + * The debug memory window is divided into 16 slots, and the + * first slot is used as a recorder for the other 15 slots. + */ +#define SOF_IPC4_MAX_DEBUG_SLOTS 15 +#define SOF_IPC4_DEBUG_SLOT_SIZE 0x1000 + +/* debug log slot types */ +#define SOF_IPC4_DEBUG_SLOT_UNUSED 0x00000000 +#define SOF_IPC4_DEBUG_SLOT_CRITICAL_LOG 0x54524300 /* byte 0: core ID */ +#define SOF_IPC4_DEBUG_SLOT_DEBUG_LOG 0x474f4c00 /* byte 0: core ID */ +#define SOF_IPC4_DEBUG_SLOT_GDB_STUB 0x42444700 +#define SOF_IPC4_DEBUG_SLOT_TELEMETRY 0x4c455400 +#define SOF_IPC4_DEBUG_SLOT_BROKEN 0x44414544 + /** @}*/ #endif diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h index bd1b72bf47..ea9af2726a 100644 --- a/include/sound/tas2781-dsp.h +++ b/include/sound/tas2781-dsp.h @@ -77,6 +77,11 @@ struct tasdev_blk { unsigned int nr_cmds; unsigned int blk_size; unsigned int nr_subblocks; + /* fixed m68k compiling issue, storing the dev_idx as a member of block + * can reduce unnecessary timeand system resource comsumption of + * dev_idx mapping every time the block data writing to the dsp. + */ + unsigned char dev_idx; unsigned char *data; }; diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h index a6c808b223..475294c853 100644 --- a/include/sound/tas2781.h +++ b/include/sound/tas2781.h @@ -135,6 +135,7 @@ struct tasdevice_priv { void tas2781_reset(struct tasdevice_priv *tas_dev); int tascodec_init(struct tasdevice_priv *tas_priv, void *codec, + struct module *module, void (*cont)(const struct firmware *fw, void *context)); struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c); int tasdevice_init(struct tasdevice_priv *tas_priv); diff --git a/include/sound/wavefront.h b/include/sound/wavefront.h index 37ed437e21..ef6f46accf 100644 --- a/include/sound/wavefront.h +++ b/include/sound/wavefront.h @@ -8,34 +8,6 @@ * Copyright (c) by Paul Barton-Davis <pbd@op.net> */ -#if (!defined(__GNUC__) && !defined(__GNUG__)) - - You will not be able to compile this file correctly without gcc, because - it is necessary to pack the "wavefront_alias" structure to a size - of 22 bytes, corresponding to 16-bit alignment (as would have been - the case on the original platform, MS-DOS). If this is not done, - then WavePatch-format files cannot be read/written correctly. - The method used to do this here ("__attribute__((packed)") is - completely compiler dependent. - - All other wavefront_* types end up aligned to 32 bit values and - still have the same (correct) size. - -#else - - /* However, note that as of G++ 2.7.3.2, g++ was unable to - correctly parse *type* __attribute__ tags. It will do the - right thing if we use the "packed" attribute on each struct - member, which has the same semantics anyway. - */ - -#endif /* __GNUC__ */ - -/***************************** WARNING ******************************** - PLEASE DO NOT MODIFY THIS FILE IN ANY WAY THAT AFFECTS ITS ABILITY TO - BE USED WITH EITHER C *OR* C++. - **********************************************************************/ - #ifndef NUM_MIDIKEYS #define NUM_MIDIKEYS 128 #endif /* NUM_MIDIKEYS */ @@ -44,29 +16,6 @@ #define NUM_MIDICHANNELS 16 #endif /* NUM_MIDICHANNELS */ -/* These are very useful/important. the original wavefront interface - was developed on a 16 bit system, where sizeof(int) = 2 - bytes. Defining things like this makes the code much more portable, and - easier to understand without having to toggle back and forth - between a 16-bit view of the world and a 32-bit one. - */ - -#ifndef __KERNEL__ -/* keep them for compatibility */ -typedef short s16; -typedef unsigned short u16; -typedef int s32; -typedef unsigned int u32; -typedef char s8; -typedef unsigned char u8; -typedef s16 INT16; -typedef u16 UINT16; -typedef s32 INT32; -typedef u32 UINT32; -typedef s8 CHAR8; -typedef u8 UCHAR8; -#endif - /* Pseudo-commands not part of the WaveFront command set. These are used for various driver controls and direct hardware control. @@ -468,7 +417,7 @@ typedef struct wf_alias { */ u8 sixteen_bit_padding; -} __attribute__((packed)) wavefront_alias; +} __packed wavefront_alias; typedef struct wf_drum { u8 PatchNumber; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 159567359b..97099a5e3f 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -108,6 +108,15 @@ #define SE_MODE_PAGE_BUF 512 #define SE_SENSE_BUF 96 +enum target_submit_type { + /* Use the fabric driver's default submission type */ + TARGET_FABRIC_DEFAULT_SUBMIT, + /* Submit from the calling context */ + TARGET_DIRECT_SUBMIT, + /* Defer submission to the LIO workqueue */ + TARGET_QUEUE_SUBMIT, +}; + /* struct se_hba->hba_flags */ enum hba_flags_table { HBA_FLAGS_INTERNAL_USE = 0x01, @@ -717,6 +726,7 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; + u8 submit_type; struct se_device *da_dev; struct config_group da_group; }; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index b188b1e90e..3378ff9ee2 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -113,11 +113,20 @@ struct target_core_fabric_ops { struct configfs_attribute **tfc_tpg_nacl_param_attrs; /* - * Set this member variable to true if the SCSI transport protocol + * Set this member variable if the SCSI transport protocol * (e.g. iSCSI) requires that the Data-Out buffer is transferred in * its entirety before a command is aborted. */ - bool write_pending_must_be_called; + unsigned int write_pending_must_be_called:1; + /* + * Set this if the driver supports submitting commands to the backend + * from target_submit/target_submit_cmd. + */ + unsigned int direct_submit_supp:1; + /* + * Set this to a target_submit_type value. + */ + u8 default_submit_type; }; int target_register_template(const struct target_core_fabric_ops *fo); @@ -166,20 +175,18 @@ int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, struct scatterlist *sgl, u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count, struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp); -void target_submit(struct se_cmd *se_cmd); +int target_submit(struct se_cmd *se_cmd); sense_reason_t transport_lookup_cmd_lun(struct se_cmd *); sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb, gfp_t gfp); sense_reason_t target_cmd_parse_cdb(struct se_cmd *); void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u64, u32, int, int, int); -void target_queue_submission(struct se_cmd *se_cmd); int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *sense, u64 unpacked_lun, void *fabric_tmr_ptr, unsigned char tm_type, gfp_t, u64, int); -int transport_handle_cdb_direct(struct se_cmd *); sense_reason_t transport_generic_new_cmd(struct se_cmd *); void target_put_cmd_and_wait(struct se_cmd *cmd); @@ -197,8 +204,6 @@ void target_stop_session(struct se_session *se_sess); void target_wait_for_sess_cmds(struct se_session *); void target_show_cmd(const char *pfx, struct se_cmd *cmd); -int core_alua_check_nonop_delay(struct se_cmd *); - int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); void core_tmr_release_req(struct se_tmr_req *); int transport_generic_handle_tmr(struct se_cmd *); diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index e9d412d19d..caec276515 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -1216,6 +1216,31 @@ TRACE_EVENT(afs_file_error, __print_symbolic(__entry->where, afs_file_errors)) ); +TRACE_EVENT(afs_bulkstat_error, + TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort), + + TP_ARGS(op, fid, index, abort), + + TP_STRUCT__entry( + __field_struct(struct afs_fid, fid) + __field(unsigned int, op) + __field(unsigned int, index) + __field(s32, abort) + ), + + TP_fast_assign( + __entry->op = op->debug_id; + __entry->fid = *fid; + __entry->index = index; + __entry->abort = abort; + ), + + TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d", + __entry->op, __entry->index, + __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique, + __entry->abort) + ); + TRACE_EVENT(afs_cm_no_server, TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx), diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index b2db2c2f1c..279a7a0c90 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1561,7 +1561,6 @@ DECLARE_EVENT_CLASS(btrfs__work, __field( const void *, wq ) __field( const void *, func ) __field( const void *, ordered_func ) - __field( const void *, ordered_free ) __field( const void *, normal_work ) ), @@ -1570,14 +1569,12 @@ DECLARE_EVENT_CLASS(btrfs__work, __entry->wq = work->wq; __entry->func = work->func; __entry->ordered_func = work->ordered_func; - __entry->ordered_free = work->ordered_free; __entry->normal_work = &work->normal_work; ), - TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p " - "ordered_free=%p", + TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p", __entry->work, __entry->normal_work, __entry->wq, - __entry->func, __entry->ordered_func, __entry->ordered_free) + __entry->func, __entry->ordered_func) ); /* @@ -2497,6 +2494,82 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_write, TP_ARGS(rbio, bio, trace_info) ); +TRACE_EVENT(btrfs_insert_one_raid_extent, + + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length, + int num_stripes), + + TP_ARGS(fs_info, logical, length, num_stripes), + + TP_STRUCT__entry_btrfs( + __field( u64, logical ) + __field( u64, length ) + __field( int, num_stripes ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->logical = logical; + __entry->length = length; + __entry->num_stripes = num_stripes; + ), + + TP_printk_btrfs("logical=%llu length=%llu num_stripes=%d", + __entry->logical, __entry->length, + __entry->num_stripes) +); + +TRACE_EVENT(btrfs_raid_extent_delete, + + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 end, + u64 found_start, u64 found_end), + + TP_ARGS(fs_info, start, end, found_start, found_end), + + TP_STRUCT__entry_btrfs( + __field( u64, start ) + __field( u64, end ) + __field( u64, found_start ) + __field( u64, found_end ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->start = start; + __entry->end = end; + __entry->found_start = found_start; + __entry->found_end = found_end; + ), + + TP_printk_btrfs("start=%llu end=%llu found_start=%llu found_end=%llu", + __entry->start, __entry->end, __entry->found_start, + __entry->found_end) +); + +TRACE_EVENT(btrfs_get_raid_extent_offset, + + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length, + u64 physical, u64 devid), + + TP_ARGS(fs_info, logical, length, physical, devid), + + TP_STRUCT__entry_btrfs( + __field( u64, logical ) + __field( u64, length ) + __field( u64, physical ) + __field( u64, devid ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->logical = logical; + __entry->length = length; + __entry->physical = physical; + __entry->devid = devid; + ), + + TP_printk_btrfs("logical=%llu length=%llu physical=%llu devid=%llu", + __entry->logical, __entry->length, __entry->physical, + __entry->devid) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h index 67e9d01f80..58cc83b99c 100644 --- a/include/trace/events/csd.h +++ b/include/trace/events/csd.h @@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu, TP_PROTO(const unsigned int cpu, unsigned long callsite, smp_call_func_t func, - struct __call_single_data *csd), + call_single_data_t *csd), TP_ARGS(cpu, callsite, func, csd), @@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu, */ DECLARE_EVENT_CLASS(csd_function, - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd), @@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function, ); DEFINE_EVENT(csd_function, csd_function_entry, - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd) ); DEFINE_EVENT(csd_function, csd_function_exit, - TP_PROTO(smp_call_func_t func, struct __call_single_data *csd), + TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd) ); diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h index c79f1d4c39..23200aabcc 100644 --- a/include/trace/events/damon.h +++ b/include/trace/events/damon.h @@ -9,12 +9,51 @@ #include <linux/types.h> #include <linux/tracepoint.h> +TRACE_EVENT_CONDITION(damos_before_apply, + + TP_PROTO(unsigned int context_idx, unsigned int scheme_idx, + unsigned int target_idx, struct damon_region *r, + unsigned int nr_regions, bool do_trace), + + TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace), + + TP_CONDITION(do_trace), + + TP_STRUCT__entry( + __field(unsigned int, context_idx) + __field(unsigned int, scheme_idx) + __field(unsigned long, target_idx) + __field(unsigned long, start) + __field(unsigned long, end) + __field(unsigned int, nr_accesses) + __field(unsigned int, age) + __field(unsigned int, nr_regions) + ), + + TP_fast_assign( + __entry->context_idx = context_idx; + __entry->scheme_idx = scheme_idx; + __entry->target_idx = target_idx; + __entry->start = r->ar.start; + __entry->end = r->ar.end; + __entry->nr_accesses = r->nr_accesses_bp / 10000; + __entry->age = r->age; + __entry->nr_regions = nr_regions; + ), + + TP_printk("ctx_idx=%u scheme_idx=%u target_idx=%lu nr_regions=%u %lu-%lu: %u %u", + __entry->context_idx, __entry->scheme_idx, + __entry->target_idx, __entry->nr_regions, + __entry->start, __entry->end, + __entry->nr_accesses, __entry->age) +); + TRACE_EVENT(damon_aggregated, - TP_PROTO(struct damon_target *t, unsigned int target_id, - struct damon_region *r, unsigned int nr_regions), + TP_PROTO(unsigned int target_id, struct damon_region *r, + unsigned int nr_regions), - TP_ARGS(t, target_id, r, nr_regions), + TP_ARGS(target_id, r, nr_regions), TP_STRUCT__entry( __field(unsigned long, target_id) diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h index 951643e6a7..a78d21fa9f 100644 --- a/include/trace/events/habanalabs.h +++ b/include/trace/events/habanalabs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright 2016-2021 HabanaLabs, Ltd. + * Copyright 2022-2023 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -87,6 +87,49 @@ DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free, TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller), TP_ARGS(dev, cpu_addr, dma_addr, size, caller)); +DECLARE_EVENT_CLASS(habanalabs_dma_map_template, + TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len, + enum dma_data_direction dir, const char *caller), + + TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller), + + TP_STRUCT__entry( + __string(dname, dev_name(dev)) + __field(u64, phys_addr) + __field(u64, dma_addr) + __field(u32, len) + __field(int, dir) + __field(const char *, caller) + ), + + TP_fast_assign( + __assign_str(dname, dev_name(dev)); + __entry->phys_addr = phys_addr; + __entry->dma_addr = dma_addr; + __entry->len = len; + __entry->dir = dir; + __entry->caller = caller; + ), + + TP_printk("%s: phys_addr: %#llx, dma_addr: %#llx, len: %#x, dir: %d, caller: %s", + __get_str(dname), + __entry->phys_addr, + __entry->dma_addr, + __entry->len, + __entry->dir, + __entry->caller) +); + +DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_map_page, + TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len, + enum dma_data_direction dir, const char *caller), + TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller)); + +DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_unmap_page, + TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len, + enum dma_data_direction dir, const char *caller), + TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller)); + DECLARE_EVENT_CLASS(habanalabs_comms_template, TP_PROTO(struct device *dev, char *op_str), diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index d735302401..af0af3f1d9 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -10,25 +10,25 @@ TRACE_EVENT(ifs_status, - TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status), + TP_PROTO(int cpu, int start, int stop, u64 status), - TP_ARGS(cpu, activate, status), + TP_ARGS(cpu, start, stop, status), TP_STRUCT__entry( __field( u64, status ) __field( int, cpu ) - __field( u8, start ) - __field( u8, stop ) + __field( u16, start ) + __field( u16, stop ) ), TP_fast_assign( __entry->cpu = cpu; - __entry->start = activate.start; - __entry->stop = activate.stop; - __entry->status = status.data; + __entry->start = start; + __entry->stop = stop; + __entry->status = status; ), - TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx", + TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx", __entry->cpu, __entry->start, __entry->stop, diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 061b5128f3..0190ef725b 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -49,10 +49,11 @@ TRACE_EVENT(mm_migrate_pages, TP_PROTO(unsigned long succeeded, unsigned long failed, unsigned long thp_succeeded, unsigned long thp_failed, - unsigned long thp_split, enum migrate_mode mode, int reason), + unsigned long thp_split, unsigned long large_folio_split, + enum migrate_mode mode, int reason), TP_ARGS(succeeded, failed, thp_succeeded, thp_failed, - thp_split, mode, reason), + thp_split, large_folio_split, mode, reason), TP_STRUCT__entry( __field( unsigned long, succeeded) @@ -60,26 +61,29 @@ TRACE_EVENT(mm_migrate_pages, __field( unsigned long, thp_succeeded) __field( unsigned long, thp_failed) __field( unsigned long, thp_split) + __field( unsigned long, large_folio_split) __field( enum migrate_mode, mode) __field( int, reason) ), TP_fast_assign( - __entry->succeeded = succeeded; - __entry->failed = failed; - __entry->thp_succeeded = thp_succeeded; - __entry->thp_failed = thp_failed; - __entry->thp_split = thp_split; - __entry->mode = mode; - __entry->reason = reason; + __entry->succeeded = succeeded; + __entry->failed = failed; + __entry->thp_succeeded = thp_succeeded; + __entry->thp_failed = thp_failed; + __entry->thp_split = thp_split; + __entry->large_folio_split = large_folio_split; + __entry->mode = mode; + __entry->reason = reason; ), - TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s", + TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu nr_split=%lu mode=%s reason=%s", __entry->succeeded, __entry->failed, __entry->thp_succeeded, __entry->thp_failed, __entry->thp_split, + __entry->large_folio_split, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 1478b9dd05..d801409b33 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -144,7 +144,7 @@ IF_HAVE_PG_ARCH_X(arch_3) #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } #elif defined(CONFIG_PPC) #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } -#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) +#elif defined(CONFIG_PARISC) #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } #elif !defined(CONFIG_MMU) #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" } diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h index 563e486173..09e72215b9 100644 --- a/include/trace/events/mptcp.h +++ b/include/trace/events/mptcp.h @@ -44,7 +44,7 @@ TRACE_EVENT(mptcp_subflow_get_send, ssk = mptcp_subflow_tcp_sock(subflow); if (ssk && sk_fullsock(ssk)) { __entry->snd_wnd = tcp_sk(ssk)->snd_wnd; - __entry->pace = ssk->sk_pacing_rate; + __entry->pace = READ_ONCE(ssk->sk_pacing_rate); } else { __entry->snd_wnd = 0; __entry->pace = 0; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index f8069ef2ee..718df1d9b8 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1667,7 +1667,7 @@ TRACE_EVENT(svcrdma_encode_wseg, __entry->offset = offset; ), - TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", + TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", __entry->cq_id, __entry->completion_id, __entry->segno, __entry->length, (unsigned long long)__entry->offset, __entry->handle @@ -1703,7 +1703,7 @@ TRACE_EVENT(svcrdma_decode_rseg, __entry->offset = segment->rs_offset; ), - TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x", + TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x", __entry->cq_id, __entry->completion_id, __entry->segno, __entry->position, __entry->length, (unsigned long long)__entry->offset, __entry->handle @@ -1740,7 +1740,7 @@ TRACE_EVENT(svcrdma_decode_wseg, __entry->offset = segment->rs_offset; ), - TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", + TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x", __entry->cq_id, __entry->completion_id, __entry->segno, __entry->length, (unsigned long long)__entry->offset, __entry->handle @@ -1959,7 +1959,7 @@ TRACE_EVENT(svcrdma_send_pullup, __entry->msglen = msglen; ), - TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)", + TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)", __entry->cq_id, __entry->completion_id, __entry->hdrlen, __entry->msglen, __entry->hdrlen + __entry->msglen) @@ -2014,7 +2014,7 @@ TRACE_EVENT(svcrdma_post_send, wr->ex.invalidate_rkey : 0; ), - TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", + TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x", __entry->cq_id, __entry->completion_id, __entry->num_sge, __entry->inv_rkey ) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index f7e537f64d..87b8de9b6c 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -128,6 +128,7 @@ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \ EM(rxrpc_skb_get_conn_work, "GET conn-work") \ + EM(rxrpc_skb_get_last_nack, "GET last-nack") \ EM(rxrpc_skb_get_local_work, "GET locl-work") \ EM(rxrpc_skb_get_reject_work, "GET rej-work ") \ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \ @@ -141,6 +142,7 @@ EM(rxrpc_skb_put_error_report, "PUT error-rep") \ EM(rxrpc_skb_put_input, "PUT input ") \ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \ + EM(rxrpc_skb_put_last_nack, "PUT last-nack") \ EM(rxrpc_skb_put_purge, "PUT purge ") \ EM(rxrpc_skb_put_rotate, "PUT rotate ") \ EM(rxrpc_skb_put_unknown, "PUT unknown ") \ @@ -178,7 +180,9 @@ #define rxrpc_peer_traces \ EM(rxrpc_peer_free, "FREE ") \ EM(rxrpc_peer_get_accept, "GET accept ") \ + EM(rxrpc_peer_get_application, "GET app ") \ EM(rxrpc_peer_get_bundle, "GET bundle ") \ + EM(rxrpc_peer_get_call, "GET call ") \ EM(rxrpc_peer_get_client_conn, "GET cln-conn") \ EM(rxrpc_peer_get_input, "GET input ") \ EM(rxrpc_peer_get_input_error, "GET inpt-err") \ @@ -187,6 +191,7 @@ EM(rxrpc_peer_get_service_conn, "GET srv-conn") \ EM(rxrpc_peer_new_client, "NEW client ") \ EM(rxrpc_peer_new_prealloc, "NEW prealloc") \ + EM(rxrpc_peer_put_application, "PUT app ") \ EM(rxrpc_peer_put_bundle, "PUT bundle ") \ EM(rxrpc_peer_put_call, "PUT call ") \ EM(rxrpc_peer_put_conn, "PUT conn ") \ @@ -1549,7 +1554,7 @@ TRACE_EVENT(rxrpc_congest, memcpy(&__entry->sum, summary, sizeof(__entry->sum)); ), - TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s", + TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, __entry->ack_serial, __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), @@ -1557,9 +1562,9 @@ TRACE_EVENT(rxrpc_congest, __print_symbolic(__entry->sum.mode, rxrpc_congest_modes), __entry->sum.cwnd, __entry->sum.ssthresh, - __entry->sum.nr_acks, __entry->sum.saw_nacks, + __entry->sum.nr_acks, __entry->sum.nr_retained_nacks, __entry->sum.nr_new_acks, - __entry->sum.nr_rot_new_acks, + __entry->sum.nr_new_nacks, __entry->top - __entry->hard_ack, __entry->sum.cumulative_acks, __entry->sum.dup_acks, diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index fbb99a61f7..6188ad0d9e 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -664,6 +664,58 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) ); +#ifdef CONFIG_NUMA_BALANCING +#define NUMAB_SKIP_REASON \ + EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \ + EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \ + EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \ + EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \ + EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \ + EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \ + EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" ) + +/* Redefine for export. */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +NUMAB_SKIP_REASON + +/* Redefine for symbolic printing. */ +#undef EM +#undef EMe +#define EM(a, b) { a, b }, +#define EMe(a, b) { a, b } + +TRACE_EVENT(sched_skip_vma_numa, + + TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma, + enum numa_vmaskip_reason reason), + + TP_ARGS(mm, vma, reason), + + TP_STRUCT__entry( + __field(unsigned long, numa_scan_offset) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + __field(enum numa_vmaskip_reason, reason) + ), + + TP_fast_assign( + __entry->numa_scan_offset = mm->numa_scan_offset; + __entry->vm_start = vma->vm_start; + __entry->vm_end = vma->vm_end; + __entry->reason = reason; + ), + + TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s", + __entry->numa_scan_offset, + __entry->vm_start, + __entry->vm_end, + __print_symbolic(__entry->reason, NUMAB_SKIP_REASON)) +); +#endif /* CONFIG_NUMA_BALANCING */ /* * Tracepoint for waking a polling cpu without an IPI. @@ -735,6 +787,11 @@ DECLARE_TRACE(sched_update_nr_running_tp, TP_PROTO(struct rq *rq, int change), TP_ARGS(rq, change)); +DECLARE_TRACE(sched_compute_energy_tp, + TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy, + unsigned long max_util, unsigned long busy_time), + TP_ARGS(p, dst_cpu, energy, max_util, busy_time)); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 6beb38c1dc..337c90787f 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1677,7 +1677,6 @@ DEFINE_SVCXDRBUF_EVENT(sendto); svc_rqst_flag(DROPME) \ svc_rqst_flag(SPLICE_OK) \ svc_rqst_flag(VICTIM) \ - svc_rqst_flag(BUSY) \ svc_rqst_flag_end(DATA) #undef svc_rqst_flag diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index 992517ac32..b930669bd1 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -267,15 +267,15 @@ DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, TP_ARGS(dev_name, err, usecs, dev_state, link_state)); TRACE_EVENT(ufshcd_command, - TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, + TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t, unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len, u32 intr, u64 lba, u8 opcode, u8 group_id), - TP_ARGS(dev_name, str_t, tag, doorbell, hwq_id, transfer_len, - intr, lba, opcode, group_id), + TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba, + opcode, group_id), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct scsi_device *, sdev) __field(enum ufs_trace_str_t, str_t) __field(unsigned int, tag) __field(u32, doorbell) @@ -288,7 +288,7 @@ TRACE_EVENT(ufshcd_command, ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __entry->sdev = sdev; __entry->str_t = str_t; __entry->tag = tag; __entry->doorbell = doorbell; @@ -302,8 +302,9 @@ TRACE_EVENT(ufshcd_command, TP_printk( "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d", - show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), - __entry->tag, __entry->doorbell, __entry->transfer_len, __entry->intr, + show_ufs_cmd_trace_str(__entry->str_t), + dev_name(&__entry->sdev->sdev_dev), __entry->tag, + __entry->doorbell, __entry->transfer_len, __entry->intr, __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode), (u32)__entry->group_id, __entry->hwq_id ) diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index d2123dd960..1a488c30af 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -285,10 +285,9 @@ TRACE_EVENT(mm_vmscan_lru_isolate, unsigned long nr_scanned, unsigned long nr_skipped, unsigned long nr_taken, - isolate_mode_t isolate_mode, int lru), - TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), + TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, lru), TP_STRUCT__entry( __field(int, highest_zoneidx) @@ -297,7 +296,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __field(unsigned long, nr_scanned) __field(unsigned long, nr_skipped) __field(unsigned long, nr_taken) - __field(unsigned int, isolate_mode) __field(int, lru) ), @@ -308,7 +306,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __entry->nr_scanned = nr_scanned; __entry->nr_skipped = nr_skipped; __entry->nr_taken = nr_taken; - __entry->isolate_mode = (__force unsigned int)isolate_mode; __entry->lru = lru; ), @@ -316,8 +313,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate, * classzone is previous name of the highest_zoneidx. * Reason not to change it is the ABI requirement of the tracepoint. */ - TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", - __entry->isolate_mode, + TP_printk("classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", __entry->highest_zoneidx, __entry->order, __entry->nr_requested, diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h index d0b3f0ea9b..f1ebe36787 100644 --- a/include/trace/events/vsock_virtio_transport_common.h +++ b/include/trace/events/vsock_virtio_transport_common.h @@ -43,7 +43,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt, __u32 len, __u16 type, __u16 op, - __u32 flags + __u32 flags, + bool zcopy ), TP_ARGS( src_cid, src_port, @@ -51,7 +52,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt, len, type, op, - flags + flags, + zcopy ), TP_STRUCT__entry( __field(__u32, src_cid) @@ -62,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt, __field(__u16, type) __field(__u16, op) __field(__u32, flags) + __field(bool, zcopy) ), TP_fast_assign( __entry->src_cid = src_cid; @@ -72,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt, __entry->type = type; __entry->op = op; __entry->flags = flags; + __entry->zcopy = zcopy; ), - TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x", + TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s", __entry->src_cid, __entry->src_port, __entry->dst_cid, __entry->dst_port, __entry->len, show_type(__entry->type), show_op(__entry->op), - __entry->flags) + __entry->flags, __entry->zcopy ? "true" : "false") ); TRACE_EVENT(virtio_transport_recv_pkt, diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 0f52d0ac47..b7bc545ec3 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -68,11 +68,6 @@ union __sifields { /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { void __user *_addr; /* faulting insn/memory ref. */ -#ifdef __ia64__ - int _imm; /* immediate value for "break" */ - unsigned int _flags; /* see ia64 si_flags */ - unsigned long _isr; /* isr */ -#endif #define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \ sizeof(short) : __alignof__(void *)) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index abe087c53b..756b013fb8 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -71,7 +71,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr) #define __NR_getcwd 17 __SYSCALL(__NR_getcwd, sys_getcwd) #define __NR_lookup_dcookie 18 -__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie) +__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall) #define __NR_eventfd2 19 __SYSCALL(__NR_eventfd2, sys_eventfd2) #define __NR_epoll_create1 20 @@ -816,15 +816,21 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) - #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) - #define __NR_fchmodat2 452 __SYSCALL(__NR_fchmodat2, sys_fchmodat2) +#define __NR_map_shadow_stack 453 +__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack) +#define __NR_futex_wake 454 +__SYSCALL(__NR_futex_wake, sys_futex_wake) +#define __NR_futex_wait 455 +__SYSCALL(__NR_futex_wait, sys_futex_wait) +#define __NR_futex_requeue 456 +__SYSCALL(__NR_futex_requeue, sys_futex_requeue) #undef __NR_syscalls -#define __NR_syscalls 453 +#define __NR_syscalls 457 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index f477eda6a2..ad21c613fe 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -150,7 +150,7 @@ extern "C" { */ #define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12) /* Flag that BO is shared coherently between multiple devices or CPU threads. - * May depend on GPU instructions to flush caches explicitly + * May depend on GPU instructions to flush caches to system scope explicitly. * * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and * may override the MTYPE selected in AMDGPU_VA_OP_MAP. @@ -163,6 +163,14 @@ extern "C" { * may override the MTYPE selected in AMDGPU_VA_OP_MAP. */ #define AMDGPU_GEM_CREATE_UNCACHED (1 << 14) +/* Flag that BO should be coherent across devices when using device-level + * atomics. May depend on GPU instructions to flush caches to device scope + * explicitly, promoting them to system scope automatically. + * + * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and + * may override the MTYPE selected in AMDGPU_VA_OP_MAP. + */ +#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15) struct drm_amdgpu_gem_create_in { /** the requested memory size */ @@ -241,9 +249,9 @@ union drm_amdgpu_bo_list { /* unknown cause */ #define AMDGPU_CTX_UNKNOWN_RESET 3 -/* indicate gpu reset occured after ctx created */ +/* indicate gpu reset occurred after ctx created */ #define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0) -/* indicate vram lost occured after ctx created */ +/* indicate vram lost occurred after ctx created */ #define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1) /* indicate some job from this context once cause gpu hang */ #define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2) @@ -586,7 +594,8 @@ struct drm_amdgpu_gem_va { */ #define AMDGPU_HW_IP_VCN_ENC 7 #define AMDGPU_HW_IP_VCN_JPEG 8 -#define AMDGPU_HW_IP_NUM 9 +#define AMDGPU_HW_IP_VPE 9 +#define AMDGPU_HW_IP_NUM 10 #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 @@ -797,6 +806,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow { #define AMDGPU_INFO_FW_MES 0x1a /* Subquery id: Query IMU firmware version */ #define AMDGPU_INFO_FW_IMU 0x1b + /* Subquery id: Query VPE firmware version */ + #define AMDGPU_INFO_FW_VPE 0x1c /* number of bytes moved for TTM migration */ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f @@ -895,6 +906,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow { #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1 /* Query the max number of IBs per gang per submission */ #define AMDGPU_INFO_MAX_IBS 0x22 +/* query last page fault info */ +#define AMDGPU_INFO_GPUVM_FAULT 0x23 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff @@ -1220,6 +1233,20 @@ struct drm_amdgpu_info_video_caps { struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT]; }; +#define AMDGPU_VMHUB_TYPE_MASK 0xff +#define AMDGPU_VMHUB_TYPE_SHIFT 0 +#define AMDGPU_VMHUB_TYPE_GFX 0 +#define AMDGPU_VMHUB_TYPE_MM0 1 +#define AMDGPU_VMHUB_TYPE_MM1 2 +#define AMDGPU_VMHUB_IDX_MASK 0xff00 +#define AMDGPU_VMHUB_IDX_SHIFT 8 + +struct drm_amdgpu_info_gpuvm_fault { + __u64 addr; + __u32 status; + __u32 vmhub; +}; + /* * Supported GPU families */ @@ -1238,6 +1265,7 @@ struct drm_amdgpu_info_video_caps { #define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */ #define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */ #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ +#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */ #if defined(__cplusplus) } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 794c1d8576..de723566c5 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -1134,6 +1134,26 @@ extern "C" { #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) +/** + * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object. + * + * KMS dumb buffers provide a very primitive way to allocate a buffer object + * suitable for scanout and map it for software rendering. KMS dumb buffers are + * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb + * buffers are not suitable to be displayed on any other device than the KMS + * device where they were allocated from. Also see + * :ref:`kms_dumb_buffer_objects`. + * + * The IOCTL argument is a struct drm_mode_create_dumb. + * + * User-space is expected to create a KMS dumb buffer via this IOCTL, then add + * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via + * &DRM_IOCTL_MODE_MAP_DUMB. + * + * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported. + * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate + * driver preferences for dumb buffers. + */ #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) #define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) #define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 8db7fd3f74..3151f1fc7e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -323,6 +323,8 @@ extern "C" { * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian */ #define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */ /* * 2 plane YCbCr MSB aligned diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index ea1b639bcb..128d09138c 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -1032,13 +1032,25 @@ struct drm_mode_crtc_page_flip_target { __u64 user_data; }; -/* create a dumb scanout buffer */ +/** + * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout. + * @height: buffer height in pixels + * @width: buffer width in pixels + * @bpp: bits per pixel + * @flags: must be zero + * @handle: buffer object handle + * @pitch: number of bytes between two consecutive lines + * @size: size of the whole buffer in bytes + * + * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds, + * the kernel fills @handle, @pitch and @size. + */ struct drm_mode_create_dumb { __u32 height; __u32 width; __u32 bpp; __u32 flags; - /* handle, pitch, size will be returned */ + __u32 handle; __u32 pitch; __u64 size; diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h index e6436f3e8e..347c7b62e6 100644 --- a/include/uapi/drm/habanalabs_accel.h +++ b/include/uapi/drm/habanalabs_accel.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note * - * Copyright 2016-2022 HabanaLabs, Ltd. + * Copyright 2016-2023 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -8,8 +8,7 @@ #ifndef HABANALABS_H_ #define HABANALABS_H_ -#include <linux/types.h> -#include <linux/ioctl.h> +#include <drm/drm.h> /* * Defines that are asic-specific but constitutes as ABI between kernel driver @@ -607,9 +606,9 @@ enum gaudi2_engine_id { /* * ASIC specific PLL index * - * Used to retrieve in frequency info of different IPs via - * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be - * used as an index in struct hl_pll_frequency_info + * Used to retrieve in frequency info of different IPs via HL_INFO_PLL_FREQUENCY under + * DRM_IOCTL_HL_INFO IOCTL. + * The enums need to be used as an index in struct hl_pll_frequency_info. */ enum hl_goya_pll_index { @@ -809,6 +808,7 @@ enum hl_server_type { * HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error. * May return 0 even though no new data is available, in that case * timestamp will be 0. + * HL_INFO_USER_ENGINE_ERR_EVENT - Retrieve the last engine id that reported an error. */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -845,6 +845,7 @@ enum hl_server_type { #define HL_INFO_FW_GENERIC_REQ 35 #define HL_INFO_HW_ERR_EVENT 36 #define HL_INFO_FW_ERR_EVENT 37 +#define HL_INFO_USER_ENGINE_ERR_EVENT 38 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -884,11 +885,11 @@ enum hl_server_type { * @dram_enabled: Whether the DRAM is enabled. * @security_enabled: Whether security is enabled on device. * @mme_master_slave_mode: Indicate whether the MME is working in master/slave - * configuration. Relevant for Greco and later. + * configuration. Relevant for Gaudi2 and later. * @cpucp_version: The CPUCP f/w version. * @card_name: The card name as passed by the f/w. * @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled. - * Relevant for Greco and later. + * Relevant for Gaudi2 and later. * @dram_page_size: The DRAM physical page size. * @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled. * Relevant for Gaudi2 and later. @@ -990,6 +991,7 @@ struct hl_info_reset_count { struct hl_info_time_sync { __u64 device_time; __u64 host_time; + __u64 tsc_time; }; /** @@ -1227,6 +1229,20 @@ struct hl_info_fw_err_event { }; /** + * struct hl_info_engine_err_event - engine error info + * @timestamp: time-stamp of error occurrence + * @engine_id: engine id who reported the error. + * @error_count: Amount of errors reported. + * @pad: size padding for u64 granularity. + */ +struct hl_info_engine_err_event { + __s64 timestamp; + __u16 engine_id; + __u16 error_count; + __u32 pad; +}; + +/** * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information. * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size * (e.g. 0x2100000 means that 1MB and 32MB pages are supported). @@ -1409,7 +1425,7 @@ union hl_cb_args { * * HL_CS_CHUNK_FLAGS_USER_ALLOC_CB: * Indicates if the CB was allocated and mapped by userspace - * (relevant to greco and above). User allocated CB is a command buffer, + * (relevant to Gaudi2 and later). User allocated CB is a command buffer, * allocated by the user, via malloc (or similar). After allocating the * CB, the user invokes - “memory ioctl” to map the user memory into a * device virtual address. The user provides this address via the @@ -1434,7 +1450,7 @@ struct hl_cs_chunk { * a DRAM address of the internal CB. In Gaudi, this might also * represent a mapped host address of the CB. * - * Greco onwards: + * Gaudi2 onwards: * For H/W queue, this represents either a Handle of CB on the * Host, or an SRAM, a DRAM, or a mapped host address of the CB. * @@ -2147,6 +2163,13 @@ struct hl_debug_args { __u32 ctx_id; }; +#define HL_IOCTL_INFO 0x00 +#define HL_IOCTL_CB 0x01 +#define HL_IOCTL_CS 0x02 +#define HL_IOCTL_WAIT_CS 0x03 +#define HL_IOCTL_MEMORY 0x04 +#define HL_IOCTL_DEBUG 0x05 + /* * Various information operations such as: * - H/W IP information @@ -2161,8 +2184,7 @@ struct hl_debug_args { * definitions of structures in kernel and userspace, e.g. in case of old * userspace and new kernel driver */ -#define HL_IOCTL_INFO \ - _IOWR('H', 0x01, struct hl_info_args) +#define DRM_IOCTL_HL_INFO DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_INFO, struct hl_info_args) /* * Command Buffer @@ -2183,8 +2205,7 @@ struct hl_debug_args { * and won't be returned to user. * */ -#define HL_IOCTL_CB \ - _IOWR('H', 0x02, union hl_cb_args) +#define DRM_IOCTL_HL_CB DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CB, union hl_cb_args) /* * Command Submission @@ -2206,7 +2227,7 @@ struct hl_debug_args { * internal. The driver will get completion notifications from the device only * on JOBS which are enqueued in the external queues. * - * Greco onwards: + * Gaudi2 onwards: * There is a single type of queue for all types of engines, either DMA engines * for transfers from/to the host or inside the device, or compute engines. * The driver will get completion notifications from the device for all queues. @@ -2236,8 +2257,7 @@ struct hl_debug_args { * and only if CS N and CS N-1 are exactly the same (same CBs for the same * queues). */ -#define HL_IOCTL_CS \ - _IOWR('H', 0x03, union hl_cs_args) +#define DRM_IOCTL_HL_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CS, union hl_cs_args) /* * Wait for Command Submission @@ -2269,9 +2289,7 @@ struct hl_debug_args { * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the * device was reset (EIO) */ - -#define HL_IOCTL_WAIT_CS \ - _IOWR('H', 0x04, union hl_wait_cs_args) +#define DRM_IOCTL_HL_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_WAIT_CS, union hl_wait_cs_args) /* * Memory @@ -2288,8 +2306,7 @@ struct hl_debug_args { * There is an option for the user to specify the requested virtual address. * */ -#define HL_IOCTL_MEMORY \ - _IOWR('H', 0x05, union hl_mem_args) +#define DRM_IOCTL_HL_MEMORY DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_MEMORY, union hl_mem_args) /* * Debug @@ -2315,10 +2332,9 @@ struct hl_debug_args { * The driver can decide to "kick out" the user if he abuses this interface. * */ -#define HL_IOCTL_DEBUG \ - _IOWR('H', 0x06, struct hl_debug_args) +#define DRM_IOCTL_HL_DEBUG DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_DEBUG, struct hl_debug_args) -#define HL_COMMAND_START 0x01 -#define HL_COMMAND_END 0x07 +#define HL_COMMAND_START (DRM_COMMAND_BASE + HL_IOCTL_INFO) +#define HL_COMMAND_END (DRM_COMMAND_BASE + HL_IOCTL_DEBUG + 1) #endif /* HABANALABS_H_ */ diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 7000e5910a..218edb0a96 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -38,13 +38,13 @@ extern "C" { */ /** - * DOC: uevents generated by i915 on it's device node + * DOC: uevents generated by i915 on its device node * * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch - * event from the gpu l3 cache. Additional information supplied is ROW, + * event from the GPU L3 cache. Additional information supplied is ROW, * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep - * track of these events and if a specific cache-line seems to have a - * persistent error remap it with the l3 remapping tool supplied in + * track of these events, and if a specific cache-line seems to have a + * persistent error, remap it with the L3 remapping tool supplied in * intel-gpu-tools. The value supplied with the event is always 1. * * I915_ERROR_UEVENT - Generated upon error detection, currently only via diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index a58a14c9f2..262db0c3be 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -69,8 +69,20 @@ extern "C" { #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3 -#define DRM_IVPU_CAP_METRIC_STREAMER 1 -#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2 +/** + * DRM_IVPU_CAP_METRIC_STREAMER + * + * Metric streamer support. Provides sampling of various hardware performance + * metrics like DMA bandwidth and cache miss/hits. Can be used for profiling. + */ +#define DRM_IVPU_CAP_METRIC_STREAMER 1 +/** + * DRM_IVPU_CAP_DMA_MEMORY_RANGE + * + * Driver has capability to allocate separate memory range + * accessible by hardware DMA. + */ +#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2 /** * struct drm_ivpu_param - Get/Set VPU parameters @@ -123,6 +135,8 @@ struct drm_ivpu_param { * %DRM_IVPU_PARAM_SKU: * VPU SKU ID (read-only) * + * %DRM_IVPU_PARAM_CAPABILITIES: + * Supported capabilities (read-only) */ __u32 param; diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h index 2d348744a8..43ac5d8645 100644 --- a/include/uapi/drm/qaic_accel.h +++ b/include/uapi/drm/qaic_accel.h @@ -242,12 +242,12 @@ struct qaic_attach_slice_entry { * @dbc_id: In. Associate the sliced BO with this DBC. * @handle: In. GEM handle of the BO to slice. * @dir: In. Direction of data flow. 1 = DMA_TO_DEVICE, 2 = DMA_FROM_DEVICE - * @size: In. Total length of the BO. - * If BO is imported (DMABUF/PRIME) then this size - * should not exceed the size of DMABUF provided. - * If BO is allocated using DRM_IOCTL_QAIC_CREATE_BO - * then this size should be exactly same as the size - * provided during DRM_IOCTL_QAIC_CREATE_BO. + * @size: In. Total length of BO being used. This should not exceed base + * size of BO (struct drm_gem_object.base) + * For BOs being allocated using DRM_IOCTL_QAIC_CREATE_BO, size of + * BO requested is PAGE_SIZE aligned then allocated hence allocated + * BO size maybe bigger. This size should not exceed the new + * PAGE_SIZE aligned BO size. * @dev_addr: In. Device address this slice pushes to or pulls from. * @db_addr: In. Address of the doorbell to ring. * @db_data: In. Data to write to the doorbell. @@ -372,6 +372,16 @@ struct qaic_perf_stats_entry { __u32 pad; }; +/** + * struct qaic_detach_slice - Detaches slicing configuration from BO. + * @handle: In. GEM handle of the BO to detach slicing configuration. + * @pad: Structure padding. Must be 0. + */ +struct qaic_detach_slice { + __u32 handle; + __u32 pad; +}; + #define DRM_QAIC_MANAGE 0x00 #define DRM_QAIC_CREATE_BO 0x01 #define DRM_QAIC_MMAP_BO 0x02 @@ -380,6 +390,7 @@ struct qaic_perf_stats_entry { #define DRM_QAIC_PARTIAL_EXECUTE_BO 0x05 #define DRM_QAIC_WAIT_BO 0x06 #define DRM_QAIC_PERF_STATS_BO 0x07 +#define DRM_QAIC_DETACH_SLICE_BO 0x08 #define DRM_IOCTL_QAIC_MANAGE DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MANAGE, struct qaic_manage_msg) #define DRM_IOCTL_QAIC_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_CREATE_BO, struct qaic_create_bo) @@ -389,6 +400,7 @@ struct qaic_perf_stats_entry { #define DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_PARTIAL_EXECUTE_BO, struct qaic_execute) #define DRM_IOCTL_QAIC_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_WAIT_BO, struct qaic_wait) #define DRM_IOCTL_QAIC_PERF_STATS_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_PERF_STATS_BO, struct qaic_perf_stats) +#define DRM_IOCTL_QAIC_DETACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_DETACH_SLICE_BO, struct qaic_detach_slice) #if defined(__cplusplus) } diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ada7acb91a..bda948a685 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -932,7 +932,14 @@ enum bpf_map_type { */ BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, + /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs + * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE + + * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE + * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE + * deprecated. + */ + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED, BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, @@ -1040,6 +1047,13 @@ enum bpf_attach_type { BPF_TCX_INGRESS, BPF_TCX_EGRESS, BPF_TRACE_UPROBE_MULTI, + BPF_CGROUP_UNIX_CONNECT, + BPF_CGROUP_UNIX_SENDMSG, + BPF_CGROUP_UNIX_RECVMSG, + BPF_CGROUP_UNIX_GETPEERNAME, + BPF_CGROUP_UNIX_GETSOCKNAME, + BPF_NETKIT_PRIMARY, + BPF_NETKIT_PEER, __MAX_BPF_ATTACH_TYPE }; @@ -1059,6 +1073,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, MAX_BPF_LINK_TYPE, }; @@ -1644,6 +1659,13 @@ union bpf_attr { __u32 flags; __u32 pid; } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; }; } link_create; @@ -2697,8 +2719,8 @@ union bpf_attr { * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. - * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** - * and **BPF_CGROUP_INET6_CONNECT**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, + * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: @@ -2936,8 +2958,8 @@ union bpf_attr { * *bpf_socket* should be one of the following: * * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. - * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** - * and **BPF_CGROUP_INET6_CONNECT**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**, + * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**. * * This helper actually implements a subset of **getsockopt()**. * It supports the same set of *optname*\ s that is supported by @@ -3257,6 +3279,11 @@ union bpf_attr { * and *params*->smac will not be set as output. A common * use case is to call **bpf_redirect_neigh**\ () after * doing **bpf_fib_lookup**\ (). + * **BPF_FIB_LOOKUP_SRC** + * Derive and set source IP addr in *params*->ipv{4,6}_src + * for the nexthop. If the src addr cannot be derived, + * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this + * case, *params*->dmac and *params*->smac are not set either. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -5092,6 +5119,8 @@ union bpf_attr { * **BPF_F_TIMER_ABS** * Start the timer in absolute expire value instead of the * default relative one. + * **BPF_F_TIMER_CPU_PIN** + * Timer will be pinned to the CPU of the caller. * * Return * 0 on success. @@ -6528,6 +6557,7 @@ struct bpf_link_info { __aligned_u64 addrs; __u32 count; /* in/out: kprobe_multi function count */ __u32 flags; + __u64 missed; } kprobe_multi; struct { __u32 type; /* enum bpf_perf_event_type */ @@ -6543,6 +6573,7 @@ struct bpf_link_info { __u32 name_len; __u32 offset; /* offset from func_name */ __u64 addr; + __u64 missed; } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */ struct { __aligned_u64 tp_name; /* in/out */ @@ -6558,6 +6589,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; }; } __attribute__((aligned(8))); @@ -6956,6 +6991,7 @@ enum { BPF_FIB_LOOKUP_OUTPUT = (1U << 1), BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), BPF_FIB_LOOKUP_TBID = (1U << 3), + BPF_FIB_LOOKUP_SRC = (1U << 4), }; enum { @@ -6968,6 +7004,7 @@ enum { BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ + BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */ }; struct bpf_fib_lookup { @@ -7002,6 +7039,9 @@ struct bpf_fib_lookup { __u32 rt_metric; }; + /* input: source address to consider for lookup + * output: source address result from lookup + */ union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ @@ -7303,9 +7343,11 @@ struct bpf_core_relo { * Flags to control bpf_timer_start() behaviour. * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is * relative to current time. + * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller. */ enum { BPF_F_TIMER_ABS = (1ULL << 0), + BPF_F_TIMER_CPU_PIN = (1ULL << 1), }; /* BPF numbers iterator state */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 6f776faaa7..f8bc34a6bc 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -333,6 +333,8 @@ struct btrfs_ioctl_fs_info_args { #define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11) #define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12) #define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13) +#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14) +#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16) struct btrfs_ioctl_feature_flags { __u64 compat_flags; @@ -756,6 +758,7 @@ struct btrfs_ioctl_get_dev_stats { #define BTRFS_QUOTA_CTL_ENABLE 1 #define BTRFS_QUOTA_CTL_DISABLE 2 #define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3 +#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4 struct btrfs_ioctl_quota_ctl_args { __u64 cmd; __u64 status; diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index fc3c32186d..d24e8e1215 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -73,6 +73,9 @@ /* Holds the block group items for extent tree v2. */ #define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL +/* Tracks RAID stripes in block groups. */ +#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL + /* device stats in the device tree */ #define BTRFS_DEV_STATS_OBJECTID 0ULL @@ -216,6 +219,22 @@ */ #define BTRFS_METADATA_ITEM_KEY 169 +/* + * Special inline ref key which stores the id of the subvolume which originally + * created the extent. This subvolume owns the extent permanently from the + * perspective of simple quotas. Needed to know which subvolume to free quota + * usage from when the extent is deleted. + * + * Stored as an inline ref rather to avoid wasting space on a separate item on + * top of the existing extent item. However, unlike the other inline refs, + * there is one one owner ref per extent rather than one per extent. + * + * Because of this, it goes at the front of the list of inline refs, and thus + * must have a lower type value than any other inline ref type (to satisfy the + * disk format rule that inline refs have non-decreasing type). + */ +#define BTRFS_EXTENT_OWNER_REF_KEY 172 + #define BTRFS_TREE_BLOCK_REF_KEY 176 #define BTRFS_EXTENT_DATA_REF_KEY 178 @@ -261,6 +280,8 @@ #define BTRFS_DEV_ITEM_KEY 216 #define BTRFS_CHUNK_ITEM_KEY 228 +#define BTRFS_RAID_STRIPE_KEY 230 + /* * Records the overall state of the qgroups. * There's only one instance of this key present, @@ -719,6 +740,30 @@ struct btrfs_free_space_header { __le64 num_bitmaps; } __attribute__ ((__packed__)); +struct btrfs_raid_stride { + /* The id of device this raid extent lives on. */ + __le64 devid; + /* The physical location on disk. */ + __le64 physical; +} __attribute__ ((__packed__)); + +/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */ +#define BTRFS_STRIPE_RAID0 1 +#define BTRFS_STRIPE_RAID1 2 +#define BTRFS_STRIPE_DUP 3 +#define BTRFS_STRIPE_RAID10 4 +#define BTRFS_STRIPE_RAID5 5 +#define BTRFS_STRIPE_RAID6 6 +#define BTRFS_STRIPE_RAID1C3 7 +#define BTRFS_STRIPE_RAID1C4 8 + +struct btrfs_stripe_extent { + __u8 encoding; + __u8 reserved[7]; + /* An array of raid strides this stripe is composed of. */ + struct btrfs_raid_stride strides[]; +} __attribute__ ((__packed__)); + #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) @@ -787,6 +832,10 @@ struct btrfs_shared_data_ref { __le32 count; } __attribute__ ((__packed__)); +struct btrfs_extent_owner_ref { + __le64 root_id; +} __attribute__ ((__packed__)); + struct btrfs_extent_inline_ref { __u8 type; __le64 offset; @@ -1204,9 +1253,17 @@ static inline __u16 btrfs_qgroup_level(__u64 qgroupid) */ #define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) +/* + * Whether or not this filesystem is using simple quotas. Not exactly the + * incompat bit, because we support using simple quotas, disabling it, then + * going back to full qgroup quotas. + */ +#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3) + #define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \ BTRFS_QGROUP_STATUS_FLAG_RESCAN | \ - BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) + BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \ + BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) #define BTRFS_QGROUP_STATUS_VERSION 1 @@ -1228,6 +1285,15 @@ struct btrfs_qgroup_status_item { * of the scan. It contains a logical address */ __le64 rescan; + + /* + * The generation when quotas were last enabled. Used by simple quotas to + * avoid decrementing when freeing an extent that was written before + * enable. + * + * Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE. + */ + __le64 enable_gen; } __attribute__ ((__packed__)); struct btrfs_qgroup_info_item { diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h index fc248ef00e..008a691c25 100644 --- a/include/uapi/linux/counter.h +++ b/include/uapi/linux/counter.h @@ -38,7 +38,7 @@ enum counter_scope { * * For example, if the Count 2 ceiling extension of Counter device 4 is desired, * set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to - * COUNTER_COUNT_SCOPE, parent equal to 2, and id equal to the value provided by + * COUNTER_SCOPE_COUNT, parent equal to 2, and id equal to the value provided by * the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id * sysfs attribute. */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 03875e078b..b3c8383d34 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -265,7 +265,7 @@ enum { * Documentation/networking/devlink/devlink-flash.rst * */ -enum { +enum devlink_flash_overwrite { DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT, DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT, @@ -680,6 +680,7 @@ enum devlink_port_function_attr { DEVLINK_PORT_FN_ATTR_STATE, /* u8 */ DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */ DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */ + DEVLINK_PORT_FN_ATTR_DEVLINK, /* nested */ __DEVLINK_PORT_FUNCTION_ATTR_MAX, DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1 diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h new file mode 100644 index 0000000000..715a491d27 --- /dev/null +++ b/include/uapi/linux/dpll.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/dpll.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_DPLL_H +#define _UAPI_LINUX_DPLL_H + +#define DPLL_FAMILY_NAME "dpll" +#define DPLL_FAMILY_VERSION 1 + +/** + * enum dpll_mode - working modes a dpll can support, differentiates if and how + * dpll selects one of its inputs to syntonize with it, valid values for + * DPLL_A_MODE attribute + * @DPLL_MODE_MANUAL: input can be only selected by sending a request to dpll + * @DPLL_MODE_AUTOMATIC: highest prio input pin auto selected by dpll + */ +enum dpll_mode { + DPLL_MODE_MANUAL = 1, + DPLL_MODE_AUTOMATIC, + + /* private: */ + __DPLL_MODE_MAX, + DPLL_MODE_MAX = (__DPLL_MODE_MAX - 1) +}; + +/** + * enum dpll_lock_status - provides information of dpll device lock status, + * valid values for DPLL_A_LOCK_STATUS attribute + * @DPLL_LOCK_STATUS_UNLOCKED: dpll was not yet locked to any valid input (or + * forced by setting DPLL_A_MODE to DPLL_MODE_DETACHED) + * @DPLL_LOCK_STATUS_LOCKED: dpll is locked to a valid signal, but no holdover + * available + * @DPLL_LOCK_STATUS_LOCKED_HO_ACQ: dpll is locked and holdover acquired + * @DPLL_LOCK_STATUS_HOLDOVER: dpll is in holdover state - lost a valid lock or + * was forced by disconnecting all the pins (latter possible only when dpll + * lock-state was already DPLL_LOCK_STATUS_LOCKED_HO_ACQ, if dpll lock-state + * was not DPLL_LOCK_STATUS_LOCKED_HO_ACQ, the dpll's lock-state shall remain + * DPLL_LOCK_STATUS_UNLOCKED) + */ +enum dpll_lock_status { + DPLL_LOCK_STATUS_UNLOCKED = 1, + DPLL_LOCK_STATUS_LOCKED, + DPLL_LOCK_STATUS_LOCKED_HO_ACQ, + DPLL_LOCK_STATUS_HOLDOVER, + + /* private: */ + __DPLL_LOCK_STATUS_MAX, + DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1) +}; + +#define DPLL_TEMP_DIVIDER 1000 + +/** + * enum dpll_type - type of dpll, valid values for DPLL_A_TYPE attribute + * @DPLL_TYPE_PPS: dpll produces Pulse-Per-Second signal + * @DPLL_TYPE_EEC: dpll drives the Ethernet Equipment Clock + */ +enum dpll_type { + DPLL_TYPE_PPS = 1, + DPLL_TYPE_EEC, + + /* private: */ + __DPLL_TYPE_MAX, + DPLL_TYPE_MAX = (__DPLL_TYPE_MAX - 1) +}; + +/** + * enum dpll_pin_type - defines possible types of a pin, valid values for + * DPLL_A_PIN_TYPE attribute + * @DPLL_PIN_TYPE_MUX: aggregates another layer of selectable pins + * @DPLL_PIN_TYPE_EXT: external input + * @DPLL_PIN_TYPE_SYNCE_ETH_PORT: ethernet port PHY's recovered clock + * @DPLL_PIN_TYPE_INT_OSCILLATOR: device internal oscillator + * @DPLL_PIN_TYPE_GNSS: GNSS recovered clock + */ +enum dpll_pin_type { + DPLL_PIN_TYPE_MUX = 1, + DPLL_PIN_TYPE_EXT, + DPLL_PIN_TYPE_SYNCE_ETH_PORT, + DPLL_PIN_TYPE_INT_OSCILLATOR, + DPLL_PIN_TYPE_GNSS, + + /* private: */ + __DPLL_PIN_TYPE_MAX, + DPLL_PIN_TYPE_MAX = (__DPLL_PIN_TYPE_MAX - 1) +}; + +/** + * enum dpll_pin_direction - defines possible direction of a pin, valid values + * for DPLL_A_PIN_DIRECTION attribute + * @DPLL_PIN_DIRECTION_INPUT: pin used as a input of a signal + * @DPLL_PIN_DIRECTION_OUTPUT: pin used to output the signal + */ +enum dpll_pin_direction { + DPLL_PIN_DIRECTION_INPUT = 1, + DPLL_PIN_DIRECTION_OUTPUT, + + /* private: */ + __DPLL_PIN_DIRECTION_MAX, + DPLL_PIN_DIRECTION_MAX = (__DPLL_PIN_DIRECTION_MAX - 1) +}; + +#define DPLL_PIN_FREQUENCY_1_HZ 1 +#define DPLL_PIN_FREQUENCY_10_KHZ 10000 +#define DPLL_PIN_FREQUENCY_77_5_KHZ 77500 +#define DPLL_PIN_FREQUENCY_10_MHZ 10000000 + +/** + * enum dpll_pin_state - defines possible states of a pin, valid values for + * DPLL_A_PIN_STATE attribute + * @DPLL_PIN_STATE_CONNECTED: pin connected, active input of phase locked loop + * @DPLL_PIN_STATE_DISCONNECTED: pin disconnected, not considered as a valid + * input + * @DPLL_PIN_STATE_SELECTABLE: pin enabled for automatic input selection + */ +enum dpll_pin_state { + DPLL_PIN_STATE_CONNECTED = 1, + DPLL_PIN_STATE_DISCONNECTED, + DPLL_PIN_STATE_SELECTABLE, + + /* private: */ + __DPLL_PIN_STATE_MAX, + DPLL_PIN_STATE_MAX = (__DPLL_PIN_STATE_MAX - 1) +}; + +/** + * enum dpll_pin_capabilities - defines possible capabilities of a pin, valid + * flags on DPLL_A_PIN_CAPABILITIES attribute + * @DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE: pin direction can be changed + * @DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE: pin priority can be changed + * @DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE: pin state can be changed + */ +enum dpll_pin_capabilities { + DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE = 1, + DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE = 2, + DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE = 4, +}; + +#define DPLL_PHASE_OFFSET_DIVIDER 1000 + +enum dpll_a { + DPLL_A_ID = 1, + DPLL_A_MODULE_NAME, + DPLL_A_PAD, + DPLL_A_CLOCK_ID, + DPLL_A_MODE, + DPLL_A_MODE_SUPPORTED, + DPLL_A_LOCK_STATUS, + DPLL_A_TEMP, + DPLL_A_TYPE, + + __DPLL_A_MAX, + DPLL_A_MAX = (__DPLL_A_MAX - 1) +}; + +enum dpll_a_pin { + DPLL_A_PIN_ID = 1, + DPLL_A_PIN_PARENT_ID, + DPLL_A_PIN_MODULE_NAME, + DPLL_A_PIN_PAD, + DPLL_A_PIN_CLOCK_ID, + DPLL_A_PIN_BOARD_LABEL, + DPLL_A_PIN_PANEL_LABEL, + DPLL_A_PIN_PACKAGE_LABEL, + DPLL_A_PIN_TYPE, + DPLL_A_PIN_DIRECTION, + DPLL_A_PIN_FREQUENCY, + DPLL_A_PIN_FREQUENCY_SUPPORTED, + DPLL_A_PIN_FREQUENCY_MIN, + DPLL_A_PIN_FREQUENCY_MAX, + DPLL_A_PIN_PRIO, + DPLL_A_PIN_STATE, + DPLL_A_PIN_CAPABILITIES, + DPLL_A_PIN_PARENT_DEVICE, + DPLL_A_PIN_PARENT_PIN, + DPLL_A_PIN_PHASE_ADJUST_MIN, + DPLL_A_PIN_PHASE_ADJUST_MAX, + DPLL_A_PIN_PHASE_ADJUST, + DPLL_A_PIN_PHASE_OFFSET, + + __DPLL_A_PIN_MAX, + DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1) +}; + +enum dpll_cmd { + DPLL_CMD_DEVICE_ID_GET = 1, + DPLL_CMD_DEVICE_GET, + DPLL_CMD_DEVICE_SET, + DPLL_CMD_DEVICE_CREATE_NTF, + DPLL_CMD_DEVICE_DELETE_NTF, + DPLL_CMD_DEVICE_CHANGE_NTF, + DPLL_CMD_PIN_ID_GET, + DPLL_CMD_PIN_GET, + DPLL_CMD_PIN_SET, + DPLL_CMD_PIN_CREATE_NTF, + DPLL_CMD_PIN_DELETE_NTF, + DPLL_CMD_PIN_CHANGE_NTF, + + __DPLL_CMD_MAX, + DPLL_CMD_MAX = (__DPLL_CMD_MAX - 1) +}; + +#define DPLL_MCGRP_MONITOR "monitor" + +#endif /* _UAPI_LINUX_DPLL_H */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 9b731976ce..9417309b72 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -140,7 +140,7 @@ typedef __s64 Elf64_Sxword; #define ELF64_ST_BIND(x) ELF_ST_BIND(x) #define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) -typedef struct dynamic { +typedef struct { Elf32_Sword d_tag; union { Elf32_Sword d_val; diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index b7b5687102..da43810b74 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -305,4 +305,63 @@ typedef int __bitwise __kernel_rwf_t; #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ RWF_APPEND) +/* Pagemap ioctl */ +#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg) + +/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */ +#define PAGE_IS_WPALLOWED (1 << 0) +#define PAGE_IS_WRITTEN (1 << 1) +#define PAGE_IS_FILE (1 << 2) +#define PAGE_IS_PRESENT (1 << 3) +#define PAGE_IS_SWAPPED (1 << 4) +#define PAGE_IS_PFNZERO (1 << 5) +#define PAGE_IS_HUGE (1 << 6) + +/* + * struct page_region - Page region with flags + * @start: Start of the region + * @end: End of the region (exclusive) + * @categories: PAGE_IS_* category bitmask for the region + */ +struct page_region { + __u64 start; + __u64 end; + __u64 categories; +}; + +/* Flags for PAGEMAP_SCAN ioctl */ +#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */ +#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */ + +/* + * struct pm_scan_arg - Pagemap ioctl argument + * @size: Size of the structure + * @flags: Flags for the IOCTL + * @start: Starting address of the region + * @end: Ending address of the region + * @walk_end Address where the scan stopped (written by kernel). + * walk_end == end (address tags cleared) informs that the scan completed on entire range. + * @vec: Address of page_region struct array for output + * @vec_len: Length of the page_region struct array + * @max_pages: Optional limit for number of returned pages (0 = disabled) + * @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1 + * @category_mask: Skip pages for which any category doesn't match + * @category_anyof_mask: Skip pages for which no category matches + * @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned + */ +struct pm_scan_arg { + __u64 size; + __u64 flags; + __u64 start; + __u64 end; + __u64 walk_end; + __u64 vec; + __u64 vec_len; + __u64 max_pages; + __u64 category_inverted; + __u64 category_mask; + __u64 category_anyof_mask; + __u64 return_mask; +}; + #endif /* _UAPI_LINUX_FS_H */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index fd1fb0d538..7a8f4c2901 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -71,7 +71,8 @@ struct fscrypt_policy_v2 { __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; - __u8 __reserved[4]; + __u8 log2_data_unit_size; + __u8 __reserved[3]; __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; }; diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index 71a5df8d26..d2ee625ea1 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -44,10 +44,35 @@ FUTEX_PRIVATE_FLAG) /* - * Flags to specify the bit length of the futex word for futex2 syscalls. - * Currently, only 32 is supported. + * Flags for futex2 syscalls. + * + * NOTE: these are not pure flags, they can also be seen as: + * + * union { + * u32 flags; + * struct { + * u32 size : 2, + * numa : 1, + * : 4, + * private : 1; + * }; + * }; */ -#define FUTEX_32 2 +#define FUTEX2_SIZE_U8 0x00 +#define FUTEX2_SIZE_U16 0x01 +#define FUTEX2_SIZE_U32 0x02 +#define FUTEX2_SIZE_U64 0x03 +#define FUTEX2_NUMA 0x04 + /* 0x08 */ + /* 0x10 */ + /* 0x20 */ + /* 0x40 */ +#define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG + +#define FUTEX2_SIZE_MASK 0x03 + +/* do not use */ +#define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */ /* * Max numbers of elements in a futex_waitv array diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h index 4c878d84db..3a93f17ca9 100644 --- a/include/uapi/linux/gsmmux.h +++ b/include/uapi/linux/gsmmux.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (c) 2022/23 Siemens Mobility GmbH */ #ifndef _LINUX_GSMMUX_H #define _LINUX_GSMMUX_H diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h index 74a8609fcb..0af23ec196 100644 --- a/include/uapi/linux/hash_info.h +++ b/include/uapi/linux/hash_info.h @@ -35,6 +35,9 @@ enum hash_algo { HASH_ALGO_SM3_256, HASH_ALGO_STREEBOG_256, HASH_ALGO_STREEBOG_512, + HASH_ALGO_SHA3_256, + HASH_ALGO_SHA3_384, + HASH_ALGO_SHA3_512, HASH_ALGO__LAST }; diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 606b52e88c..3d1987e1bb 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -31,6 +31,7 @@ enum idxd_scmd_stat { IDXD_SCMD_WQ_IRQ_ERR = 0x80100000, IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000, IDXD_SCMD_DEV_EVL_ERR = 0x80120000, + IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000, }; #define IDXD_SCMD_SOFTERR_MASK 0x80000000 diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index f95326fce6..2e23f99dc0 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -723,6 +723,24 @@ enum { }; #define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1) +/* [MDBA_GET_ENTRY] = { + * struct br_mdb_entry + * [MDBA_GET_ENTRY_ATTRS] = { + * [MDBE_ATTR_SOURCE] + * struct in_addr / struct in6_addr + * [MDBE_ATTR_SRC_VNI] + * u32 + * } + * } + */ +enum { + MDBA_GET_ENTRY_UNSPEC, + MDBA_GET_ENTRY, + MDBA_GET_ENTRY_ATTRS, + __MDBA_GET_ENTRY_MAX, +}; +#define MDBA_GET_ENTRY_MAX (__MDBA_GET_ENTRY_MAX - 1) + /* [MDBA_SET_ENTRY_ATTRS] = { * [MDBE_ATTR_xxx] * ... diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ce3117df9c..29ff80da27 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -376,7 +376,7 @@ enum { IFLA_GSO_IPV4_MAX_SIZE, IFLA_GRO_IPV4_MAX_SIZE, - + IFLA_DPLL_PIN, __IFLA_MAX }; @@ -510,6 +510,8 @@ enum { IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, IFLA_BR_MCAST_QUERIER_STATE, + IFLA_BR_FDB_N_LEARNED, + IFLA_BR_FDB_MAX_LEARNED, __IFLA_BR_MAX, }; @@ -756,6 +758,30 @@ struct tunnel_msg { __u32 ifindex; }; +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ /* include statistics in the dump */ @@ -1392,7 +1418,9 @@ enum { enum { IFLA_DSA_UNSPEC, - IFLA_DSA_MASTER, + IFLA_DSA_CONDUIT, + /* Deprecated, use IFLA_DSA_CONDUIT instead */ + IFLA_DSA_MASTER = IFLA_DSA_CONDUIT, __IFLA_DSA_MAX, }; diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index c79f2f046a..9c2ffdcd66 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -47,6 +47,10 @@ enum iio_chan_type { IIO_POSITIONRELATIVE, IIO_PHASE, IIO_MASSCONCENTRATION, + IIO_DELTA_ANGL, + IIO_DELTA_VELOCITY, + IIO_COLORTEMP, + IIO_CHROMATICITY, }; enum iio_modifier { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2..f1c16f8177 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -43,6 +43,10 @@ struct io_uring_sqe { union { __u64 addr; /* pointer to buffer or iovecs */ __u64 splice_off_in; + struct { + __u32 level; + __u32 optname; + }; }; __u32 len; /* buffer size or number of iovecs */ union { @@ -65,6 +69,8 @@ struct io_uring_sqe { __u32 xattr_flags; __u32 msg_ring_flags; __u32 uring_cmd_flags; + __u32 waitid_flags; + __u32 futex_flags; }; __u64 user_data; /* data to be passed back at completion time */ /* pack this to avoid bogus arm OABI complaints */ @@ -79,6 +85,7 @@ struct io_uring_sqe { union { __s32 splice_fd_in; __u32 file_index; + __u32 optlen; struct { __u16 addr_len; __u16 __pad3[1]; @@ -89,6 +96,7 @@ struct io_uring_sqe { __u64 addr3; __u64 __pad2[1]; }; + __u64 optval; /* * If the ring is initialized with IORING_SETUP_SQE128, then * this field is used for 80 bytes of arbitrary command data @@ -240,19 +248,23 @@ enum io_uring_op { IORING_OP_URING_CMD, IORING_OP_SEND_ZC, IORING_OP_SENDMSG_ZC, + IORING_OP_READ_MULTISHOT, + IORING_OP_WAITID, + IORING_OP_FUTEX_WAIT, + IORING_OP_FUTEX_WAKE, + IORING_OP_FUTEX_WAITV, /* this goes last, obviously */ IORING_OP_LAST, }; /* - * sqe->uring_cmd_flags + * sqe->uring_cmd_flags top 8bits aren't available for userspace * IORING_URING_CMD_FIXED use registered buffer; pass this flag * along with setting sqe->buf_index. - * IORING_URING_CMD_POLLED driver use only */ #define IORING_URING_CMD_FIXED (1U << 0) -#define IORING_URING_CMD_POLLED (1U << 31) +#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED /* @@ -734,6 +746,8 @@ struct io_uring_recvmsg_out { enum { SOCKET_URING_OP_SIOCINQ = 0, SOCKET_URING_OP_SIOCOUTQ, + SOCKET_URING_OP_GETSOCKOPT, + SOCKET_URING_OP_SETSOCKOPT, }; #ifdef __cplusplus diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index b4ba0c0cba..0b2bc6252e 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -47,6 +47,8 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, }; /** @@ -348,19 +350,85 @@ struct iommu_vfio_ioas { #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) /** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, +}; + +/** + * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table + * entry attributes + * @IOMMU_VTD_S1_SRE: Supervisor request + * @IOMMU_VTD_S1_EAFE: Extended access enable + * @IOMMU_VTD_S1_WPE: Write protect enable + */ +enum iommu_hwpt_vtd_s1_flags { + IOMMU_VTD_S1_SRE = 1 << 0, + IOMMU_VTD_S1_EAFE = 1 << 1, + IOMMU_VTD_S1_WPE = 1 << 2, +}; + +/** + * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table + * info (IOMMU_HWPT_DATA_VTD_S1) + * @flags: Combination of enum iommu_hwpt_vtd_s1_flags + * @pgtbl_addr: The base address of the stage-1 page table. + * @addr_width: The address width of the stage-1 page table + * @__reserved: Must be 0 + */ +struct iommu_hwpt_vtd_s1 { + __aligned_u64 flags; + __aligned_u64 pgtbl_addr; + __u32 addr_width; + __u32 __reserved; +}; + +/** + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type + * @IOMMU_HWPT_DATA_NONE: no data + * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table + */ +enum iommu_hwpt_data_type { + IOMMU_HWPT_DATA_NONE, + IOMMU_HWPT_DATA_VTD_S1, +}; + +/** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS to connect this HWPT to + * @pt_id: The IOAS or HWPT to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 + * @data_type: One of enum iommu_hwpt_data_type + * @data_len: Length of the type specific data + * @data_uptr: User pointer to the type specific data * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the * underlying iommu driver's iommu_domain kernel object. * - * A HWPT will be created with the IOVA mappings from the given IOAS. + * A kernel-managed HWPT will be created with the mappings from the given + * IOAS via the @pt_id. The @data_type for this allocation must be set to + * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a + * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. + * + * A user-managed nested HWPT will be created from a given parent HWPT via + * @pt_id, in which the parent HWPT must be allocated previously via the + * same ioctl from a given IOAS (@pt_id). In this case, the @data_type + * must be set to a pre-defined type corresponding to an I/O page table + * type supported by the underlying IOMMU hardware. + * + * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and + * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr + * must be given. */ struct iommu_hwpt_alloc { __u32 size; @@ -369,13 +437,26 @@ struct iommu_hwpt_alloc { __u32 pt_id; __u32 out_hwpt_id; __u32 __reserved; + __u32 data_type; + __u32 data_len; + __aligned_u64 data_uptr; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) /** + * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info + * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings + * on a nested_parent domain. + * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html + */ +enum iommu_hw_info_vtd_flags { + IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0, +}; + +/** * struct iommu_hw_info_vtd - Intel VT-d hardware information * - * @flags: Must be 0 + * @flags: Combination of enum iommu_hw_info_vtd_flags * @__reserved: Must be 0 * * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec @@ -405,6 +486,20 @@ enum iommu_hw_info_type { }; /** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + +/** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) * @flags: Must be 0 @@ -415,6 +510,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +536,81 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + #endif diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index eeb2fdcbdc..f0ed68974c 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -405,6 +405,7 @@ struct kfd_ioctl_acquire_vm_args { #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25) +#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24) /* Allocate memory for later SVM (shared virtual memory) mapping. * @@ -659,6 +660,8 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 +/* Fine grained coherency between all devices using device-scope atomics */ +#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 13065dd961..211b86de35 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -264,6 +264,7 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +337,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -1192,6 +1200,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_COUNTER_OFFSET 227 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 #ifdef KVM_CAP_IRQ_ROUTING @@ -1362,6 +1371,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1562,6 +1572,7 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 81d09ef9aa..25c8d76775 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -31,6 +31,12 @@ struct landlock_ruleset_attr { * this access right. */ __u64 handled_access_fs; + /** + * @handled_access_net: Bitmask of actions (cf. `Network flags`_) + * that is handled by this ruleset and should then be forbidden if no + * rule explicitly allow them. + */ + __u64 handled_access_net; }; /* @@ -54,6 +60,11 @@ enum landlock_rule_type { * landlock_path_beneath_attr . */ LANDLOCK_RULE_PATH_BENEATH = 1, + /** + * @LANDLOCK_RULE_NET_PORT: Type of a &struct + * landlock_net_port_attr . + */ + LANDLOCK_RULE_NET_PORT, }; /** @@ -80,6 +91,31 @@ struct landlock_path_beneath_attr { } __attribute__((packed)); /** + * struct landlock_net_port_attr - Network port definition + * + * Argument of sys_landlock_add_rule(). + */ +struct landlock_net_port_attr { + /** + * @allowed_access: Bitmask of allowed access network for a port + * (cf. `Network flags`_). + */ + __u64 allowed_access; + /** + * @port: Network port in host endianness. + * + * It should be noted that port 0 passed to :manpage:`bind(2)` will + * bind to an available port from a specific port range. This can be + * configured thanks to the ``/proc/sys/net/ipv4/ip_local_port_range`` + * sysctl (also used for IPv6). A Landlock rule with port 0 and the + * ``LANDLOCK_ACCESS_NET_BIND_TCP`` right means that requesting to bind + * on port 0 is allowed and it will automatically translate to binding + * on the related port range. + */ + __u64 port; +}; + +/** * DOC: fs_access * * A set of actions on kernel objects may be defined by an attribute (e.g. @@ -189,4 +225,23 @@ struct landlock_path_beneath_attr { #define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14) /* clang-format on */ +/** + * DOC: net_access + * + * Network flags + * ~~~~~~~~~~~~~~~~ + * + * These flags enable to restrict a sandboxed process to a set of network + * actions. This is supported since the Landlock ABI version 4. + * + * TCP sockets with allowed actions: + * + * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port. + * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to + * a remote port. + */ +/* clang-format off */ +#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0) +#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1) +/* clang-format on */ #endif /* _UAPI_LINUX_LANDLOCK_H */ diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index a03c543cb0..f05f747e44 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -34,7 +34,7 @@ #define MEDIA_BUS_FMT_FIXED 0x0001 -/* RGB - next is 0x1025 */ +/* RGB - next is 0x1026 */ #define MEDIA_BUS_FMT_RGB444_1X12 0x1016 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 @@ -46,6 +46,7 @@ #define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 #define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 #define MEDIA_BUS_FMT_RGB666_1X18 0x1009 +#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025 #define MEDIA_BUS_FMT_BGR666_1X18 0x1023 #define MEDIA_BUS_FMT_RBG888_1X24 0x100e #define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015 diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h index 6e57743628..171c5cce36 100644 --- a/include/uapi/linux/mei.h +++ b/include/uapi/linux/mei.h @@ -14,8 +14,8 @@ * FW Client (given by UUID). This opens a communication channel * between a host client and a FW client. From this point every read and write * will communicate with the associated FW client. - * Only in close() (file_operation release()) the communication between - * the clients is disconnected + * Only in close() (file_operation release()) is the communication between + * the clients disconnected. * * The IOCTL argument is a struct with a union that contains * the input parameter and the output parameter for this IOCTL. @@ -51,7 +51,7 @@ struct mei_connect_client_data { * DOC: set and unset event notification for a connected client * * The IOCTL argument is 1 for enabling event notification and 0 for - * disabling the service + * disabling the service. * Return: -EOPNOTSUPP if the devices doesn't support the feature */ #define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32) @@ -59,8 +59,8 @@ struct mei_connect_client_data { /** * DOC: retrieve notification * - * The IOCTL output argument is 1 if an event was is pending and 0 otherwise - * the ioctl has to be called in order to acknowledge pending event + * The IOCTL output argument is 1 if an event was pending and 0 otherwise. + * The ioctl has to be called in order to acknowledge pending event. * * Return: -EOPNOTSUPP if the devices doesn't support the feature */ @@ -98,14 +98,14 @@ struct mei_connect_client_data_vtag { * FW Client (given by UUID), and virtual tag (vtag). * The IOCTL opens a communication channel between a host client and * a FW client on a tagged channel. From this point on, every read - * and write will communicate with the associated FW client with + * and write will communicate with the associated FW client * on the tagged channel. * Upone close() the communication is terminated. * * The IOCTL argument is a struct with a union that contains * the input parameter and the output parameter for this IOCTL. * - * The input parameter is UUID of the FW Client, a vtag [0,255] + * The input parameter is UUID of the FW Client, a vtag [0,255]. * The output parameter is the properties of the FW client * (FW protocool version and max message size). * diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 046d0ccba4..a8963f7ef4 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -48,7 +48,7 @@ enum { #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to policy */ #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */ -#define MPOL_MF_LAZY (1<<3) /* Modifies '_MOVE: lazy migrate on fault */ +#define MPOL_MF_LAZY (1<<3) /* UNSUPPORTED FLAG: Lazy migrate on fault */ #define MPOL_MF_INTERNAL (1<<4) /* Internal flags start here */ #define MPOL_MF_VALID (MPOL_MF_STRICT | \ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index ee9c49f949..a6451561f3 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -23,91 +23,20 @@ #define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7) #define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8) -enum { - MPTCP_SUBFLOW_ATTR_UNSPEC, - MPTCP_SUBFLOW_ATTR_TOKEN_REM, - MPTCP_SUBFLOW_ATTR_TOKEN_LOC, - MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, - MPTCP_SUBFLOW_ATTR_MAP_SEQ, - MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, - MPTCP_SUBFLOW_ATTR_SSN_OFFSET, - MPTCP_SUBFLOW_ATTR_MAP_DATALEN, - MPTCP_SUBFLOW_ATTR_FLAGS, - MPTCP_SUBFLOW_ATTR_ID_REM, - MPTCP_SUBFLOW_ATTR_ID_LOC, - MPTCP_SUBFLOW_ATTR_PAD, - __MPTCP_SUBFLOW_ATTR_MAX -}; - -#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1) - -/* netlink interface */ -#define MPTCP_PM_NAME "mptcp_pm" #define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds" #define MPTCP_PM_EV_GRP_NAME "mptcp_pm_events" -#define MPTCP_PM_VER 0x1 - -/* - * ATTR types defined for MPTCP - */ -enum { - MPTCP_PM_ATTR_UNSPEC, - - MPTCP_PM_ATTR_ADDR, /* nested address */ - MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */ - MPTCP_PM_ATTR_SUBFLOWS, /* u32 */ - MPTCP_PM_ATTR_TOKEN, /* u32 */ - MPTCP_PM_ATTR_LOC_ID, /* u8 */ - MPTCP_PM_ATTR_ADDR_REMOTE, /* nested address */ - - __MPTCP_PM_ATTR_MAX -}; - -#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1) - -enum { - MPTCP_PM_ADDR_ATTR_UNSPEC, - - MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */ - MPTCP_PM_ADDR_ATTR_ID, /* u8 */ - MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */ - MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */ - MPTCP_PM_ADDR_ATTR_PORT, /* u16 */ - MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */ - MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */ - - __MPTCP_PM_ADDR_ATTR_MAX -}; - -#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1) - -#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) -#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) -#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) -#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) -#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4) - -enum { - MPTCP_PM_CMD_UNSPEC, - MPTCP_PM_CMD_ADD_ADDR, - MPTCP_PM_CMD_DEL_ADDR, - MPTCP_PM_CMD_GET_ADDR, - MPTCP_PM_CMD_FLUSH_ADDRS, - MPTCP_PM_CMD_SET_LIMITS, - MPTCP_PM_CMD_GET_LIMITS, - MPTCP_PM_CMD_SET_FLAGS, - MPTCP_PM_CMD_ANNOUNCE, - MPTCP_PM_CMD_REMOVE, - MPTCP_PM_CMD_SUBFLOW_CREATE, - MPTCP_PM_CMD_SUBFLOW_DESTROY, - - __MPTCP_PM_CMD_AFTER_LAST -}; +#include <linux/mptcp_pm.h> #define MPTCP_INFO_FLAG_FALLBACK _BITUL(0) #define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1) +#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) +#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) +#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) +#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) +#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4) + struct mptcp_info { __u8 mptcpi_subflows; __u8 mptcpi_add_addr_signal; @@ -130,93 +59,6 @@ struct mptcp_info { __u64 mptcpi_bytes_acked; }; -/* - * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport - * A new MPTCP connection has been created. It is the good time to allocate - * memory and send ADD_ADDR if needed. Depending on the traffic-patterns - * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent. - * - * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, - * sport, dport - * A MPTCP connection is established (can start new subflows). - * - * MPTCP_EVENT_CLOSED: token - * A MPTCP connection has stopped. - * - * MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] - * A new address has been announced by the peer. - * - * MPTCP_EVENT_REMOVED: token, rem_id - * An address has been lost by the peer. - * - * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, - * saddr4 | saddr6, daddr4 | daddr6, sport, - * dport, backup, if_idx [, error] - * A new subflow has been established. 'error' should not be set. - * - * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6, - * daddr4 | daddr6, sport, dport, backup, if_idx - * [, error] - * A subflow has been closed. An error (copy of sk_err) could be set if an - * error has been detected for this subflow. - * - * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6, - * daddr4 | daddr6, sport, dport, backup, if_idx - * [, error] - * The priority of a subflow has changed. 'error' should not be set. - * - * MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 - * A new PM listener is created. - * - * MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 - * A PM listener is closed. - */ -enum mptcp_event_type { - MPTCP_EVENT_UNSPEC = 0, - MPTCP_EVENT_CREATED = 1, - MPTCP_EVENT_ESTABLISHED = 2, - MPTCP_EVENT_CLOSED = 3, - - MPTCP_EVENT_ANNOUNCED = 6, - MPTCP_EVENT_REMOVED = 7, - - MPTCP_EVENT_SUB_ESTABLISHED = 10, - MPTCP_EVENT_SUB_CLOSED = 11, - - MPTCP_EVENT_SUB_PRIORITY = 13, - - MPTCP_EVENT_LISTENER_CREATED = 15, - MPTCP_EVENT_LISTENER_CLOSED = 16, -}; - -enum mptcp_event_attr { - MPTCP_ATTR_UNSPEC = 0, - - MPTCP_ATTR_TOKEN, /* u32 */ - MPTCP_ATTR_FAMILY, /* u16 */ - MPTCP_ATTR_LOC_ID, /* u8 */ - MPTCP_ATTR_REM_ID, /* u8 */ - MPTCP_ATTR_SADDR4, /* be32 */ - MPTCP_ATTR_SADDR6, /* struct in6_addr */ - MPTCP_ATTR_DADDR4, /* be32 */ - MPTCP_ATTR_DADDR6, /* struct in6_addr */ - MPTCP_ATTR_SPORT, /* be16 */ - MPTCP_ATTR_DPORT, /* be16 */ - MPTCP_ATTR_BACKUP, /* u8 */ - MPTCP_ATTR_ERROR, /* u8 */ - MPTCP_ATTR_FLAGS, /* u16 */ - MPTCP_ATTR_TIMEOUT, /* u32 */ - MPTCP_ATTR_IF_IDX, /* s32 */ - MPTCP_ATTR_RESET_REASON,/* u32 */ - MPTCP_ATTR_RESET_FLAGS, /* u32 */ - MPTCP_ATTR_SERVER_SIDE, /* u8 */ - - __MPTCP_ATTR_AFTER_LAST -}; - -#define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1) - /* MPTCP Reset reason codes, rfc8684 */ #define MPTCP_RST_EUNSPEC 0 #define MPTCP_RST_EMPTCP 1 diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h new file mode 100644 index 0000000000..b5d11aece4 --- /dev/null +++ b/include/uapi/linux/mptcp_pm.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/mptcp.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_MPTCP_PM_H +#define _UAPI_LINUX_MPTCP_PM_H + +#define MPTCP_PM_NAME "mptcp_pm" +#define MPTCP_PM_VER 1 + +/** + * enum mptcp_event_type + * @MPTCP_EVENT_UNSPEC: unused event + * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport A new MPTCP connection has been created. It is the good time + * to allocate memory and send ADD_ADDR if needed. Depending on the + * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED + * is sent. + * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6, + * sport, dport A MPTCP connection is established (can start new subflows). + * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped. + * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A + * new address has been announced by the peer. + * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer. + * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 | + * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new + * subflow has been established. 'error' should not be set. + * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6, + * daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been + * closed. An error (copy of sk_err) could be set if an error has been + * detected for this subflow. + * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6, + * daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a + * subflow has changed. 'error' should not be set. + * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM + * listener is created. + * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener + * is closed. + */ +enum mptcp_event_type { + MPTCP_EVENT_UNSPEC, + MPTCP_EVENT_CREATED, + MPTCP_EVENT_ESTABLISHED, + MPTCP_EVENT_CLOSED, + MPTCP_EVENT_ANNOUNCED = 6, + MPTCP_EVENT_REMOVED, + MPTCP_EVENT_SUB_ESTABLISHED = 10, + MPTCP_EVENT_SUB_CLOSED, + MPTCP_EVENT_SUB_PRIORITY = 13, + MPTCP_EVENT_LISTENER_CREATED = 15, + MPTCP_EVENT_LISTENER_CLOSED, +}; + +enum { + MPTCP_PM_ADDR_ATTR_UNSPEC, + MPTCP_PM_ADDR_ATTR_FAMILY, + MPTCP_PM_ADDR_ATTR_ID, + MPTCP_PM_ADDR_ATTR_ADDR4, + MPTCP_PM_ADDR_ATTR_ADDR6, + MPTCP_PM_ADDR_ATTR_PORT, + MPTCP_PM_ADDR_ATTR_FLAGS, + MPTCP_PM_ADDR_ATTR_IF_IDX, + + __MPTCP_PM_ADDR_ATTR_MAX +}; +#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1) + +enum { + MPTCP_SUBFLOW_ATTR_UNSPEC, + MPTCP_SUBFLOW_ATTR_TOKEN_REM, + MPTCP_SUBFLOW_ATTR_TOKEN_LOC, + MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, + MPTCP_SUBFLOW_ATTR_MAP_SEQ, + MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, + MPTCP_SUBFLOW_ATTR_SSN_OFFSET, + MPTCP_SUBFLOW_ATTR_MAP_DATALEN, + MPTCP_SUBFLOW_ATTR_FLAGS, + MPTCP_SUBFLOW_ATTR_ID_REM, + MPTCP_SUBFLOW_ATTR_ID_LOC, + MPTCP_SUBFLOW_ATTR_PAD, + + __MPTCP_SUBFLOW_ATTR_MAX +}; +#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1) + +enum { + MPTCP_PM_ENDPOINT_ADDR = 1, + + __MPTCP_PM_ENDPOINT_MAX +}; +#define MPTCP_PM_ENDPOINT_MAX (__MPTCP_PM_ENDPOINT_MAX - 1) + +enum { + MPTCP_PM_ATTR_UNSPEC, + MPTCP_PM_ATTR_ADDR, + MPTCP_PM_ATTR_RCV_ADD_ADDRS, + MPTCP_PM_ATTR_SUBFLOWS, + MPTCP_PM_ATTR_TOKEN, + MPTCP_PM_ATTR_LOC_ID, + MPTCP_PM_ATTR_ADDR_REMOTE, + + __MPTCP_ATTR_AFTER_LAST +}; +#define MPTCP_PM_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1) + +enum mptcp_event_attr { + MPTCP_ATTR_UNSPEC, + MPTCP_ATTR_TOKEN, + MPTCP_ATTR_FAMILY, + MPTCP_ATTR_LOC_ID, + MPTCP_ATTR_REM_ID, + MPTCP_ATTR_SADDR4, + MPTCP_ATTR_SADDR6, + MPTCP_ATTR_DADDR4, + MPTCP_ATTR_DADDR6, + MPTCP_ATTR_SPORT, + MPTCP_ATTR_DPORT, + MPTCP_ATTR_BACKUP, + MPTCP_ATTR_ERROR, + MPTCP_ATTR_FLAGS, + MPTCP_ATTR_TIMEOUT, + MPTCP_ATTR_IF_IDX, + MPTCP_ATTR_RESET_REASON, + MPTCP_ATTR_RESET_FLAGS, + MPTCP_ATTR_SERVER_SIDE, + + __MPTCP_ATTR_MAX +}; +#define MPTCP_ATTR_MAX (__MPTCP_ATTR_MAX - 1) + +enum { + MPTCP_PM_CMD_UNSPEC, + MPTCP_PM_CMD_ADD_ADDR, + MPTCP_PM_CMD_DEL_ADDR, + MPTCP_PM_CMD_GET_ADDR, + MPTCP_PM_CMD_FLUSH_ADDRS, + MPTCP_PM_CMD_SET_LIMITS, + MPTCP_PM_CMD_GET_LIMITS, + MPTCP_PM_CMD_SET_FLAGS, + MPTCP_PM_CMD_ANNOUNCE, + MPTCP_PM_CMD_REMOVE, + MPTCP_PM_CMD_SUBFLOW_CREATE, + MPTCP_PM_CMD_SUBFLOW_DESTROY, + + __MPTCP_PM_CMD_AFTER_LAST +}; +#define MPTCP_PM_CMD_MAX (__MPTCP_PM_CMD_AFTER_LAST - 1) + +#endif /* _UAPI_LINUX_MPTCP_PM_H */ diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index c1634b95c2..2943a151d4 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -38,11 +38,27 @@ enum netdev_xdp_act { NETDEV_XDP_ACT_MASK = 127, }; +/** + * enum netdev_xdp_rx_metadata + * @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW + * timestamp via bpf_xdp_metadata_rx_timestamp(). + * @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet + * hash via bpf_xdp_metadata_rx_hash(). + */ +enum netdev_xdp_rx_metadata { + NETDEV_XDP_RX_METADATA_TIMESTAMP = 1, + NETDEV_XDP_RX_METADATA_HASH = 2, + + /* private: */ + NETDEV_XDP_RX_METADATA_MASK = 3, +}; + enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD, NETDEV_A_DEV_XDP_FEATURES, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, + NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, __NETDEV_A_DEV_MAX, NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index ca30232b7b..117c6a9b84 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -285,9 +285,11 @@ enum nft_rule_attributes { /** * enum nft_rule_compat_flags - nf_tables rule compat flags * + * @NFT_RULE_COMPAT_F_UNUSED: unused * @NFT_RULE_COMPAT_F_INV: invert the check result */ enum nft_rule_compat_flags { + NFT_RULE_COMPAT_F_UNUSED = (1 << 0), NFT_RULE_COMPAT_F_INV = (1 << 1), NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV, }; diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index e2ae82e3f9..f87aaf28a6 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -298,6 +298,8 @@ struct nla_bitfield32 { * entry has attributes again, the policy for those inner ones * and the corresponding maxtype may be specified. * @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute + * @NL_ATTR_TYPE_SINT: 32-bit or 64-bit signed attribute, aligned to 4B + * @NL_ATTR_TYPE_UINT: 32-bit or 64-bit unsigned attribute, aligned to 4B */ enum netlink_attribute_type { NL_ATTR_TYPE_INVALID, @@ -322,6 +324,9 @@ enum netlink_attribute_type { NL_ATTR_TYPE_NESTED_ARRAY, NL_ATTR_TYPE_BITFIELD32, + + NL_ATTR_TYPE_SINT, + NL_ATTR_TYPE_UINT, }; /** diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h new file mode 100644 index 0000000000..3cd044edee --- /dev/null +++ b/include/uapi/linux/nfsd_netlink.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/nfsd.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_NFSD_NETLINK_H +#define _UAPI_LINUX_NFSD_NETLINK_H + +#define NFSD_FAMILY_NAME "nfsd" +#define NFSD_FAMILY_VERSION 1 + +enum { + NFSD_A_RPC_STATUS_XID = 1, + NFSD_A_RPC_STATUS_FLAGS, + NFSD_A_RPC_STATUS_PROG, + NFSD_A_RPC_STATUS_VERSION, + NFSD_A_RPC_STATUS_PROC, + NFSD_A_RPC_STATUS_SERVICE_TIME, + NFSD_A_RPC_STATUS_PAD, + NFSD_A_RPC_STATUS_SADDR4, + NFSD_A_RPC_STATUS_DADDR4, + NFSD_A_RPC_STATUS_SADDR6, + NFSD_A_RPC_STATUS_DADDR6, + NFSD_A_RPC_STATUS_SPORT, + NFSD_A_RPC_STATUS_DPORT, + NFSD_A_RPC_STATUS_COMPOUND_OPS, + + __NFSD_A_RPC_STATUS_MAX, + NFSD_A_RPC_STATUS_MAX = (__NFSD_A_RPC_STATUS_MAX - 1) +}; + +enum { + NFSD_CMD_RPC_STATUS_GET = 1, + + __NFSD_CMD_MAX, + NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1) +}; + +#endif /* _UAPI_LINUX_NFSD_NETLINK_H */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 88eb85c630..dced2c49da 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -167,7 +167,7 @@ * following events occur. * a) Expiration of hardware timer whose expiration time is set to maximum * coalescing delay of matching coalesce rule. - * b) Coalescing buffer in hardware reaches it's limit. + * b) Coalescing buffer in hardware reaches its limit. * c) Packet doesn't match any of the configured coalesce rules. * * User needs to configure following parameters for creating a coalesce @@ -326,7 +326,7 @@ /** * DOC: Multi-Link Operation * - * In Multi-Link Operation, a connection between to MLDs utilizes multiple + * In Multi-Link Operation, a connection between two MLDs utilizes multiple * links. To use this in nl80211, various commands and responses now need * to or will include the new %NL80211_ATTR_MLO_LINKS attribute. * Additionally, various commands that need to operate on a specific link @@ -335,6 +335,15 @@ */ /** + * DOC: OWE DH IE handling offload + * + * By setting @NL80211_EXT_FEATURE_OWE_OFFLOAD flag, drivers can indicate + * kernel/application space to avoid DH IE handling. When this flag is + * advertised, the driver/device will take care of DH IE inclusion and + * processing of peer DH IE to generate PMK. + */ + +/** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@ -2690,11 +2699,13 @@ enum nl80211_commands { * * @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS * discovery. It is a nested attribute, see - * &enum nl80211_fils_discovery_attributes. + * &enum nl80211_fils_discovery_attributes. Userspace should pass an empty + * nested attribute to disable this feature and delete the templates. * * @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure * unsolicited broadcast probe response. It is a nested attribute, see - * &enum nl80211_unsol_bcast_probe_resp_attributes. + * &enum nl80211_unsol_bcast_probe_resp_attributes. Userspace should pass an empty + * nested attribute to disable this feature and delete the templates. * * @NL80211_ATTR_S1G_CAPABILITY: S1G Capability information element (from * association request when used with NL80211_CMD_NEW_STATION) @@ -4213,6 +4224,8 @@ enum nl80211_wmm_rule { * as the primary or any of the secondary channels isn't possible * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel * in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_PSD: Power spectral density (in dBm) that + * is allowed on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4251,6 +4264,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_16MHZ, NL80211_FREQUENCY_ATTR_NO_320MHZ, NL80211_FREQUENCY_ATTR_NO_EHT, + NL80211_FREQUENCY_ATTR_PSD, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4351,6 +4365,8 @@ enum nl80211_reg_type { * a given frequency range. The value is in mBm (100 * dBm). * @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds. * If not present or 0 default CAC time will be used. + * @NL80211_ATTR_POWER_RULE_PSD: power spectral density (in dBm). + * This could be negative. * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number * currently defined * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use @@ -4368,6 +4384,8 @@ enum nl80211_reg_rule_attr { NL80211_ATTR_DFS_CAC_TIME, + NL80211_ATTR_POWER_RULE_PSD, + /* keep last */ __NL80211_REG_RULE_ATTR_AFTER_LAST, NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1 @@ -4451,6 +4469,7 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_HE: HE operation not allowed * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed * @NL80211_RRF_NO_EHT: EHT operation not allowed + * @NL80211_RRF_PSD: Ruleset has power spectral density value */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -4471,6 +4490,7 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_HE = 1<<17, NL80211_RRF_NO_320MHZ = 1<<18, NL80211_RRF_NO_EHT = 1<<19, + NL80211_RRF_PSD = 1<<20, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR @@ -5038,7 +5058,7 @@ enum nl80211_bss_scan_width { * elements from a Beacon frame (bin); not present if no Beacon frame has * yet been received * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel - * (u32, enum nl80211_bss_scan_width) + * (u32, enum nl80211_bss_scan_width) - No longer used! * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64) * (not present if no beacon frame has been received yet) * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and @@ -6400,6 +6420,12 @@ enum nl80211_feature_flags { * in authentication and deauthentication frames sent to unassociated peer * using @NL80211_CMD_FRAME. * + * @NL80211_EXT_FEATURE_OWE_OFFLOAD: Driver/Device wants to do OWE DH IE + * handling in station mode. + * + * @NL80211_EXT_FEATURE_OWE_OFFLOAD_AP: Driver/Device wants to do OWE DH IE + * handling in AP mode. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -6471,6 +6497,8 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_PUNCT, NL80211_EXT_FEATURE_SECURE_NAN, NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA, + NL80211_EXT_FEATURE_OWE_OFFLOAD, + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -7606,7 +7634,7 @@ enum nl80211_iftype_akm_attributes { * @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU). * Allowed range: 0..10000 (TU = Time Unit) * @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU). - * Allowed range: 0..10000 (TU = Time Unit) + * Allowed range: 0..10000 (TU = Time Unit). If set to 0, the feature is disabled. * @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action * frame including the headers. * @@ -7639,7 +7667,8 @@ enum nl80211_fils_discovery_attributes { * * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU). * Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0 - * 26.17.2.3.2 (AP behavior for fast passive scanning). + * 26.17.2.3.2 (AP behavior for fast passive scanning). If set to 0, the feature is + * disabled. * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response * frame template (binary). * diff --git a/include/uapi/linux/npcm-video.h b/include/uapi/linux/npcm-video.h new file mode 100644 index 0000000000..1d39f6f38c --- /dev/null +++ b/include/uapi/linux/npcm-video.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Controls header for NPCM video driver + * + * Copyright (C) 2022 Nuvoton Technologies + */ + +#ifndef _UAPI_LINUX_NPCM_VIDEO_H +#define _UAPI_LINUX_NPCM_VIDEO_H + +#include <linux/v4l2-controls.h> + +/* + * Check Documentation/userspace-api/media/drivers/npcm-video.rst for control + * details. + */ + +/* + * This control is meant to set the mode of NPCM Video Capture/Differentiation + * (VCD) engine. + * + * The VCD engine supports two modes: + * COMPLETE - Capture the next complete frame into memory. + * DIFF - Compare the incoming frame with the frame stored in memory, and + * updates the differentiated frame in memory. + */ +#define V4L2_CID_NPCM_CAPTURE_MODE (V4L2_CID_USER_NPCM_BASE + 0) + +enum v4l2_npcm_capture_mode { + V4L2_NPCM_CAPTURE_MODE_COMPLETE = 0, /* COMPLETE mode */ + V4L2_NPCM_CAPTURE_MODE_DIFF = 1, /* DIFF mode */ +}; + +/* + * This control is meant to get the count of compressed HEXTILE rectangles which + * is relevant to the number of differentiated frames if VCD is in DIFF mode. + * And the count will always be 1 if VCD is in COMPLETE mode. + */ +#define V4L2_CID_NPCM_RECT_COUNT (V4L2_CID_USER_NPCM_BASE + 1) + +#endif /* _UAPI_LINUX_NPCM_VIDEO_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index e5f558d964..a39193213f 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -80,6 +80,7 @@ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 +#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */ #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ @@ -637,6 +638,7 @@ #define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTSTA 0x20 /* Root Status */ +#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ /* @@ -930,12 +932,13 @@ /* Process Address Space ID */ #define PCI_PASID_CAP 0x04 /* PASID feature register */ -#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ -#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */ +#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_WIDTH 0x1f00 #define PCI_PASID_CTRL 0x06 /* PASID control register */ -#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ -#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ -#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */ +#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */ +#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */ +#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */ #define PCI_EXT_CAP_PASID_SIZEOF 8 /* Single Root I/O Virtualization */ @@ -975,6 +978,8 @@ #define PCI_LTR_VALUE_MASK 0x000003ff #define PCI_LTR_SCALE_MASK 0x00001c00 #define PCI_LTR_SCALE_SHIFT 10 +#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */ +#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */ #define PCI_EXT_CAP_LTR_SIZEOF 8 /* Access Control Service */ @@ -1042,9 +1047,16 @@ #define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */ +#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */ #define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */ @@ -1088,6 +1100,8 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */ +#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */ /* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */ #define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 3f85ae5780..f762a10bfb 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -941,15 +941,22 @@ enum { TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */ + TCA_FQ_PRIOMAP, /* prio2band */ + + TCA_FQ_WEIGHTS, /* Weights for each band */ + __TCA_FQ_MAX }; #define TCA_FQ_MAX (__TCA_FQ_MAX - 1) +#define FQ_BANDS 3 +#define FQ_MIN_WEIGHT 16384 + struct tc_fq_qd_stats { __u64 gc_flows; - __u64 highprio_packets; - __u64 tcp_retrans; + __u64 highprio_packets; /* obsolete */ + __u64 tcp_retrans; /* obsolete */ __u64 throttled; __u64 flows_plimit; __u64 pkts_too_long; @@ -962,6 +969,10 @@ struct tc_fq_qd_stats { __u64 ce_mark; /* packets above ce_threshold */ __u64 horizon_drops; __u64 horizon_caps; + __u64 fastpath_packets; + __u64 band_drops[FQ_BANDS]; + __u32 band_pkt_count[FQ_BANDS]; + __u32 pad; }; /* Heavy-Hitter Filter */ diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 1c9da48531..b44ba7dcde 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -68,6 +68,7 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, + SEV_RET_INVALID_KEY = 0x27, SEV_RET_MAX, } sev_ret_code; diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 05cc35fc94..da700999ca 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -224,6 +224,8 @@ struct ptp_pin_desc { _IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise) #define PTP_SYS_OFFSET_EXTENDED2 \ _IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended) +#define PTP_MASK_CLEAR_ALL _IO(PTP_CLK_MAGIC, 19) +#define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int) struct ptp_extts_event { struct ptp_clock_time t; /* Time event occured. */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 51c13cf9c5..3b687d20c9 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -502,13 +502,17 @@ enum { #define RTAX_MAX (__RTAX_MAX - 1) -#define RTAX_FEATURE_ECN (1 << 0) -#define RTAX_FEATURE_SACK (1 << 1) -#define RTAX_FEATURE_TIMESTAMP (1 << 2) -#define RTAX_FEATURE_ALLFRAG (1 << 3) - -#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \ - RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG) +#define RTAX_FEATURE_ECN (1 << 0) +#define RTAX_FEATURE_SACK (1 << 1) /* unused */ +#define RTAX_FEATURE_TIMESTAMP (1 << 2) /* unused */ +#define RTAX_FEATURE_ALLFRAG (1 << 3) /* unused */ +#define RTAX_FEATURE_TCP_USEC_TS (1 << 4) + +#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | \ + RTAX_FEATURE_SACK | \ + RTAX_FEATURE_TIMESTAMP | \ + RTAX_FEATURE_ALLFRAG | \ + RTAX_FEATURE_TCP_USEC_TS) struct rta_session { __u8 proto; diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h index f2c4589d4d..9066238568 100644 --- a/include/uapi/linux/sched/types.h +++ b/include/uapi/linux/sched/types.h @@ -4,10 +4,6 @@ #include <linux/types.h> -struct sched_param { - int sched_priority; -}; - #define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ #define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index add349889d..9c007a1063 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -1,22 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* - * linux/drivers/char/serial_core.h - * * Copyright (C) 2000 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _UAPILINUX_SERIAL_CORE_H #define _UAPILINUX_SERIAL_CORE_H @@ -27,6 +11,8 @@ * The type definitions. These are from Ted Ts'o's serial.h * By historical reasons the values from 0 to 13 are defined * in the include/uapi/linux/serial.h, do not define them here. + * Values 0 to 19 are used by setserial from busybox and must never + * be modified. */ #define PORT_NS16550A 14 #define PORT_XSCALE 15 @@ -245,4 +231,7 @@ /* Sunplus UART */ #define PORT_SUNPLUS 123 +/* Generic type identifier for ports which type is not important to userspace. */ +#define PORT_GENERIC (-1) + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index 08b3527e1b..9c987b04e2 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -49,6 +49,7 @@ #define UART_IIR_FIFO_ENABLED_8250 0x00 /* 8250: no FIFO */ #define UART_IIR_FIFO_ENABLED_16550 0x80 /* 16550: (broken/unusable) FIFO */ #define UART_IIR_FIFO_ENABLED_16550A 0xc0 /* 16550A: FIFO enabled */ +#define UART_IIR_FIFO_ENABLED_16750 0xe0 /* 16750: 64 bytes FIFO enabled */ #define UART_FCR 2 /* Out: FIFO Control Register */ #define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */ diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h index 2aa39112cf..154a87a1ec 100644 --- a/include/uapi/linux/sev-guest.h +++ b/include/uapi/linux/sev-guest.h @@ -14,9 +14,11 @@ #include <linux/types.h> +#define SNP_REPORT_USER_DATA_SIZE 64 + struct snp_report_req { /* user data that should be included in the report */ - __u8 user_data[64]; + __u8 user_data[SNP_REPORT_USER_DATA_SIZE]; /* The vmpl level to be included in the report */ __u32 vmpl; diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 26f33a4c25..a0819c6a59 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -24,7 +24,7 @@ enum IPSTATS_MIB_INOCTETS, /* InOctets */ IPSTATS_MIB_INDELIVERS, /* InDelivers */ IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */ - IPSTATS_MIB_OUTPKTS, /* OutRequests */ + IPSTATS_MIB_OUTREQUESTS, /* OutRequests */ IPSTATS_MIB_OUTOCTETS, /* OutOctets */ /* other fields */ IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */ @@ -57,6 +57,7 @@ enum IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ IPSTATS_MIB_CEPKTS, /* InCEPkts */ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */ + IPSTATS_MIB_OUTPKTS, /* OutTransmits */ __IPSTATS_MIB_MAX }; @@ -296,6 +297,11 @@ enum LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */ LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */ LINUX_MIB_TCPPLBREHASH, /* TCPPLBRehash */ + LINUX_MIB_TCPAOREQUIRED, /* TCPAORequired */ + LINUX_MIB_TCPAOBAD, /* TCPAOBad */ + LINUX_MIB_TCPAOKEYNOTFOUND, /* TCPAOKeyNotFound */ + LINUX_MIB_TCPAOGOOD, /* TCPAOGood */ + LINUX_MIB_TCPAODROPPEDICMPS, /* TCPAODroppedIcmps */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 879eeb0a08..c07e9f90c0 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -129,6 +129,11 @@ enum { #define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */ +#define TCP_AO_ADD_KEY 38 /* Add/Set MKT */ +#define TCP_AO_DEL_KEY 39 /* Delete MKT */ +#define TCP_AO_INFO 40 /* Set/list TCP-AO per-socket options */ +#define TCP_AO_GET_KEYS 41 /* List MKT(s) */ +#define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */ #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 @@ -170,6 +175,7 @@ enum tcp_fastopen_client_fail { #define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */ #define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */ #define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */ +#define TCPI_OPT_USEC_TS 64 /* usec timestamps */ /* * Sender's congestion state indicating normal or abnormal situations @@ -289,6 +295,18 @@ struct tcp_info { */ __u32 tcpi_rehash; /* PLB or timeout triggered rehash attempts */ + + __u16 tcpi_total_rto; /* Total number of RTO timeouts, including + * SYN/SYN-ACK and recurring timeouts. + */ + __u16 tcpi_total_rto_recoveries; /* Total number of RTO + * recoveries, including any + * unfinished recovery. + */ + __u32 tcpi_total_rto_time; /* Total time spent in RTO recoveries + * in milliseconds, including any + * unfinished recovery. + */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ @@ -348,6 +366,106 @@ struct tcp_diag_md5sig { __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; }; +#define TCP_AO_MAXKEYLEN 80 + +#define TCP_AO_KEYF_IFINDEX (1 << 0) /* L3 ifindex for VRF */ +#define TCP_AO_KEYF_EXCLUDE_OPT (1 << 1) /* "Indicates whether TCP + * options other than TCP-AO + * are included in the MAC + * calculation" + */ + +struct tcp_ao_add { /* setsockopt(TCP_AO_ADD_KEY) */ + struct __kernel_sockaddr_storage addr; /* peer's address for the key */ + char alg_name[64]; /* crypto hash algorithm to use */ + __s32 ifindex; /* L3 dev index for VRF */ + __u32 set_current :1, /* set key as Current_key at once */ + set_rnext :1, /* request it from peer with RNext_key */ + reserved :30; /* must be 0 */ + __u16 reserved2; /* padding, must be 0 */ + __u8 prefix; /* peer's address prefix */ + __u8 sndid; /* SendID for outgoing segments */ + __u8 rcvid; /* RecvID to match for incoming seg */ + __u8 maclen; /* length of authentication code (hash) */ + __u8 keyflags; /* see TCP_AO_KEYF_ */ + __u8 keylen; /* length of ::key */ + __u8 key[TCP_AO_MAXKEYLEN]; +} __attribute__((aligned(8))); + +struct tcp_ao_del { /* setsockopt(TCP_AO_DEL_KEY) */ + struct __kernel_sockaddr_storage addr; /* peer's address for the key */ + __s32 ifindex; /* L3 dev index for VRF */ + __u32 set_current :1, /* corresponding ::current_key */ + set_rnext :1, /* corresponding ::rnext */ + del_async :1, /* only valid for listen sockets */ + reserved :29; /* must be 0 */ + __u16 reserved2; /* padding, must be 0 */ + __u8 prefix; /* peer's address prefix */ + __u8 sndid; /* SendID for outgoing segments */ + __u8 rcvid; /* RecvID to match for incoming seg */ + __u8 current_key; /* KeyID to set as Current_key */ + __u8 rnext; /* KeyID to set as Rnext_key */ + __u8 keyflags; /* see TCP_AO_KEYF_ */ +} __attribute__((aligned(8))); + +struct tcp_ao_info_opt { /* setsockopt(TCP_AO_INFO), getsockopt(TCP_AO_INFO) */ + /* Here 'in' is for setsockopt(), 'out' is for getsockopt() */ + __u32 set_current :1, /* in/out: corresponding ::current_key */ + set_rnext :1, /* in/out: corresponding ::rnext */ + ao_required :1, /* in/out: don't accept non-AO connects */ + set_counters :1, /* in: set/clear ::pkt_* counters */ + accept_icmps :1, /* in/out: accept incoming ICMPs */ + reserved :27; /* must be 0 */ + __u16 reserved2; /* padding, must be 0 */ + __u8 current_key; /* in/out: KeyID of Current_key */ + __u8 rnext; /* in/out: keyid of RNext_key */ + __u64 pkt_good; /* in/out: verified segments */ + __u64 pkt_bad; /* in/out: failed verification */ + __u64 pkt_key_not_found; /* in/out: could not find a key to verify */ + __u64 pkt_ao_required; /* in/out: segments missing TCP-AO sign */ + __u64 pkt_dropped_icmp; /* in/out: ICMPs that were ignored */ +} __attribute__((aligned(8))); + +struct tcp_ao_getsockopt { /* getsockopt(TCP_AO_GET_KEYS) */ + struct __kernel_sockaddr_storage addr; /* in/out: dump keys for peer + * with this address/prefix + */ + char alg_name[64]; /* out: crypto hash algorithm */ + __u8 key[TCP_AO_MAXKEYLEN]; + __u32 nkeys; /* in: size of the userspace buffer + * @optval, measured in @optlen - the + * sizeof(struct tcp_ao_getsockopt) + * out: number of keys that matched + */ + __u16 is_current :1, /* in: match and dump Current_key, + * out: the dumped key is Current_key + */ + + is_rnext :1, /* in: match and dump RNext_key, + * out: the dumped key is RNext_key + */ + get_all :1, /* in: dump all keys */ + reserved :13; /* padding, must be 0 */ + __u8 sndid; /* in/out: dump keys with SendID */ + __u8 rcvid; /* in/out: dump keys with RecvID */ + __u8 prefix; /* in/out: dump keys with address/prefix */ + __u8 maclen; /* out: key's length of authentication + * code (hash) + */ + __u8 keyflags; /* in/out: see TCP_AO_KEYF_ */ + __u8 keylen; /* out: length of ::key */ + __s32 ifindex; /* in/out: L3 dev index for VRF */ + __u64 pkt_good; /* out: verified segments */ + __u64 pkt_bad; /* out: segments that failed verification */ +} __attribute__((aligned(8))); + +struct tcp_ao_repair { /* {s,g}etsockopt(TCP_AO_REPAIR) */ + __be32 snt_isn; + __be32 rcv_isn; + __u32 snd_sne; + __u32 rcv_sne; +} __attribute__((aligned(8))); + /* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */ #define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1 diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h index c7d2199134..f0224a8dc8 100644 --- a/include/uapi/linux/usb/raw_gadget.h +++ b/include/uapi/linux/usb/raw_gadget.h @@ -44,6 +44,16 @@ enum usb_raw_event_type { /* This event is queued when a new control request arrived to ep0. */ USB_RAW_EVENT_CONTROL = 2, + /* + * These events are queued when the gadget driver is suspended, + * resumed, reset, or disconnected. Note that some UDCs (e.g. dwc2) + * report a disconnect event instead of a reset. + */ + USB_RAW_EVENT_SUSPEND = 3, + USB_RAW_EVENT_RESUME = 4, + USB_RAW_EVENT_RESET = 5, + USB_RAW_EVENT_DISCONNECT = 6, + /* The list might grow in the future. */ }; @@ -54,8 +64,8 @@ enum usb_raw_event_type { * actual length of the fetched event data. * @data: A buffer to store the fetched event data. * - * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT, - * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL. + * The fetched event data buffer contains struct usb_ctrlrequest for + * USB_RAW_EVENT_CONTROL and is empty for other events. */ struct usb_raw_event { __u32 type; diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h index 2984aae4a2..f74f3aedd4 100644 --- a/include/uapi/linux/user_events.h +++ b/include/uapi/linux/user_events.h @@ -17,6 +17,15 @@ /* Create dynamic location entry within a 32-bit value */ #define DYN_LOC(offset, size) ((size) << 16 | (offset)) +/* List of supported registration flags */ +enum user_reg_flag { + /* Event will not delete upon last reference closing */ + USER_EVENT_REG_PERSIST = 1U << 0, + + /* This value or above is currently non-ABI */ + USER_EVENT_REG_MAX = 1U << 1, +}; + /* * Describes an event registration and stores the results of the registration. * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum @@ -33,7 +42,7 @@ struct user_reg { /* Input: Enable size in bytes at address */ __u8 enable_size; - /* Input: Flags for future use, set to 0 */ + /* Input: Flags to use, if any */ __u16 flags; /* Input: Address to update when enabled */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 62151706c5..0dbc810150 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -40,7 +40,8 @@ UFFD_FEATURE_EXACT_ADDRESS | \ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ UFFD_FEATURE_WP_UNPOPULATED | \ - UFFD_FEATURE_POISON) + UFFD_FEATURE_POISON | \ + UFFD_FEATURE_WP_ASYNC) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -216,6 +217,11 @@ struct uffdio_api { * (i.e. empty ptes). This will be the default behavior for shmem * & hugetlbfs, so this flag only affects anonymous memory behavior * when userfault write-protection mode is registered. + * + * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection + * asynchronous mode is supported in which the write fault is + * automatically resolved and write-protection is un-set. + * It implies UFFD_FEATURE_WP_UNPOPULATED. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -232,6 +238,7 @@ struct uffdio_api { #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) #define UFFD_FEATURE_WP_UNPOPULATED (1<<13) #define UFFD_FEATURE_POISON (1<<14) +#define UFFD_FEATURE_WP_ASYNC (1<<15) __u64 features; __u64 ioctls; diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index c3604a0a3e..68db66d4aa 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -203,6 +203,12 @@ enum v4l2_colorfx { */ #define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0) +/* + * The base for Nuvoton NPCM driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0) + /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index afc1369216..7f5fb01022 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -277,8 +277,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */ __u32 index; /* Region index */ __u32 cap_offset; /* Offset within info struct of first cap */ - __u64 size; /* Region size (bytes) */ - __u64 offset; /* Region offset from start of device fd */ + __aligned_u64 size; /* Region size (bytes) */ + __aligned_u64 offset; /* Region offset from start of device fd */ }; #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) @@ -294,8 +294,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1 struct vfio_region_sparse_mmap_area { - __u64 offset; /* Offset of mmap'able area within region */ - __u64 size; /* Size of mmap'able area */ + __aligned_u64 offset; /* Offset of mmap'able area within region */ + __aligned_u64 size; /* Size of mmap'able area */ }; struct vfio_region_info_cap_sparse_mmap { @@ -450,9 +450,9 @@ struct vfio_device_migration_info { VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; - __u64 pending_bytes; - __u64 data_offset; - __u64 data_size; + __aligned_u64 pending_bytes; + __aligned_u64 data_offset; + __aligned_u64 data_size; }; /* @@ -476,7 +476,7 @@ struct vfio_device_migration_info { struct vfio_region_info_cap_nvlink2_ssatgt { struct vfio_info_cap_header header; - __u64 tgt; + __aligned_u64 tgt; }; /* @@ -816,7 +816,7 @@ struct vfio_device_gfx_plane_info { __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */ /* out */ __u32 drm_format; /* drm format of plane */ - __u64 drm_format_mod; /* tiled mode */ + __aligned_u64 drm_format_mod; /* tiled mode */ __u32 width; /* width of plane */ __u32 height; /* height of plane */ __u32 stride; /* stride of plane */ @@ -829,6 +829,7 @@ struct vfio_device_gfx_plane_info { __u32 region_index; /* region index */ __u32 dmabuf_id; /* dma-buf id */ }; + __u32 reserved; }; #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14) @@ -863,9 +864,10 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */ #define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */ #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf) - __u64 offset; /* device fd offset of write */ - __u64 data; /* data to be written */ + __aligned_u64 offset; /* device fd offset of write */ + __aligned_u64 data; /* data to be written */ __s32 fd; /* -1 for de-assignment */ + __u32 reserved; }; #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) @@ -1434,6 +1436,27 @@ struct vfio_device_feature_mig_data_size { #define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9 +/** + * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device + * based on the operation specified in op flag. + * + * The functionality is incorporated for devices that needs bus master control, + * but the in-band device interface lacks the support. Consequently, it is not + * applicable to PCI devices, as bus master control for PCI devices is managed + * in-band through the configuration space. At present, this feature is supported + * only for CDX devices. + * When the device's BUS MASTER setting is configured as CLEAR, it will result in + * blocking all incoming DMA requests from the device. On the other hand, configuring + * the device's BUS MASTER setting as SET (enable) will grant the device the + * capability to perform DMA to the host memory. + */ +struct vfio_device_feature_bus_master { + __u32 op; +#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */ +#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */ +}; +#define VFIO_DEVICE_FEATURE_BUS_MASTER 10 + /* -------- API for Type1 VFIO IOMMU -------- */ /** @@ -1449,7 +1472,7 @@ struct vfio_iommu_type1_info { __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ #define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */ __u32 cap_offset; /* Offset within info struct of first cap */ __u32 pad; }; diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index f5c48b61ab..649560c685 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -219,4 +219,12 @@ */ #define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) +/* Get the group for the descriptor table including driver & device areas + * of a virtqueue: read index, write group in num. + * The virtqueue index is stored in the index field of vhost_vring_state. + * The group ID of the descriptor table for this specific virtqueue + * is returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \ + struct vhost_vring_state) #endif diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index 2d827d22cd..d7656908f7 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range { * DRIVER_OK */ #define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6 +/* Device may expose the virtqueue's descriptor area, driver area and + * device area to a different group for ASID binding than where its + * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID. + */ +#define VHOST_BACKEND_F_DESC_ASID 0x7 +/* IOTLB don't flush memory mapping across device reset */ +#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8 #endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 78260e5d99..c3d4e490ce 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -804,6 +804,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */ #define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */ #define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */ +#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */ /* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ #define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 2c712c6541..8881aea60f 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -105,6 +105,11 @@ */ #define VIRTIO_F_NOTIFICATION_DATA 38 +/* This feature indicates that the driver uses the data provided by the device + * as a virtqueue identifier in available buffer notifications. + */ +#define VIRTIO_F_NOTIF_CONFIG_DATA 39 + /* * This feature indicates that the driver can reset a queue individually. */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index f703afc7ad..44f4dd2add 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -166,6 +166,17 @@ struct virtio_pci_common_cfg { __le32 queue_used_hi; /* read-write */ }; +/* + * Warning: do not use sizeof on this: use offsetofend for + * specific fields you need. + */ +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + + __le16 queue_notify_data; /* read-write */ + __le16 queue_reset; /* read-write */ +}; + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 23543c33fe..6a77328be1 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -4,6 +4,7 @@ #include <linux/in6.h> #include <linux/types.h> +#include <linux/stddef.h> /* All of the structures in this file may not change size as they are * passed into the kernel from userspace via netlink sockets. @@ -33,7 +34,7 @@ struct xfrm_sec_ctx { __u8 ctx_alg; __u16 ctx_len; __u32 ctx_sid; - char ctx_str[]; + char ctx_str[] __counted_by(ctx_len); }; /* Security Context Domains of Interpretation */ diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h index dcb179de43..e157160317 100644 --- a/include/uapi/mtd/ubi-user.h +++ b/include/uapi/mtd/ubi-user.h @@ -248,6 +248,7 @@ enum { * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs * @padding: reserved for future, not used, has to be zeroed * @disable_fm: whether disable fastmap + * @need_resv_pool: whether reserve free pebs for filling pool/wl_pool * * This data structure is used to specify MTD device UBI has to attach and the * parameters it has to use. The number which should be assigned to the new UBI @@ -293,7 +294,8 @@ struct ubi_attach_req { __s32 vid_hdr_offset; __s16 max_beb_per1024; __s8 disable_fm; - __s8 padding[9]; + __s8 need_resv_pool; + __s8 padding[8]; }; /* diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 2e68a8b0c9..ce0f37f834 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -52,15 +52,25 @@ struct hns_roce_ib_create_cq_resp { __aligned_u64 cap_flags; }; +enum hns_roce_srq_cap_flags { + HNS_ROCE_SRQ_CAP_RECORD_DB = 1 << 0, +}; + +enum hns_roce_srq_cap_flags_resp { + HNS_ROCE_RSP_SRQ_CAP_RECORD_DB = 1 << 0, +}; + struct hns_roce_ib_create_srq { __aligned_u64 buf_addr; __aligned_u64 db_addr; __aligned_u64 que_addr; + __u32 req_cap_flags; /* Use enum hns_roce_srq_cap_flags */ + __u32 reserved; }; struct hns_roce_ib_create_srq_resp { __u32 srqn; - __u32 reserved; + __u32 cap_flags; /* Use enum hns_roce_srq_cap_flags */ }; struct hns_roce_ib_create_qp { diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index d7c5aaa327..fe15bc7e9f 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -220,7 +220,8 @@ enum ib_uverbs_advise_mr_flag { struct ib_uverbs_query_port_resp_ex { struct ib_uverbs_query_port_resp legacy_resp; __u16 port_cap_flags2; - __u8 reserved[6]; + __u8 reserved[2]; + __u32 active_speed_ex; }; struct ib_uverbs_qp_cap { diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index e50c357367..723bbb0f70 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -299,6 +299,8 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_STAT_GET_STATUS, + RDMA_NLDEV_CMD_RES_SRQ_GET_RAW, + RDMA_NLDEV_NUM_OPS }; @@ -554,6 +556,8 @@ enum rdma_nldev_attr { RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, /* u32 */ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, /* u8 */ + RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, /* u8 */ + /* * Always the end */ diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h index af735f55b2..6df4972495 100644 --- a/include/uapi/rdma/siw-abi.h +++ b/include/uapi/rdma/siw-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h index b5bc8604ef..c85fdd8895 100644 --- a/include/uapi/sound/asequencer.h +++ b/include/uapi/sound/asequencer.h @@ -207,7 +207,7 @@ struct snd_seq_ev_raw32 { struct snd_seq_ev_ext { unsigned int len; /* length of data */ void *ptr; /* pointer to data (note: maybe 64-bit) */ -} __attribute__((packed)); +} __packed; struct snd_seq_result { int event; /* processed event type */ @@ -251,7 +251,7 @@ struct snd_seq_ev_quote { struct snd_seq_addr origin; /* original sender */ unsigned short value; /* optional data */ struct snd_seq_event *event; /* quoted event */ -} __attribute__((packed)); +} __packed; union snd_seq_event_data { /* event data... */ struct snd_seq_ev_note note; diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h index e145bca510..8b8c5d1420 100644 --- a/include/uapi/xen/privcmd.h +++ b/include/uapi/xen/privcmd.h @@ -110,6 +110,22 @@ struct privcmd_irqfd { __u8 pad[2]; }; +/* For privcmd_ioeventfd::flags */ +#define PRIVCMD_IOEVENTFD_FLAG_DEASSIGN (1 << 0) + +struct privcmd_ioeventfd { + __u64 ioreq; + __u64 ports; + __u64 addr; + __u32 addr_len; + __u32 event_fd; + __u32 vcpus; + __u32 vq; + __u32 flags; + domid_t dom; + __u8 pad[2]; +}; + /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t @@ -139,5 +155,7 @@ struct privcmd_irqfd { _IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource)) #define IOCTL_PRIVCMD_IRQFD \ _IOW('P', 8, struct privcmd_irqfd) +#define IOCTL_PRIVCMD_IOEVENTFD \ + _IOW('P', 9, struct privcmd_ioeventfd) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index 0cced88f45..e77ab17868 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -98,9 +98,10 @@ enum upiu_response_transaction { UPIU_TRANSACTION_REJECT_UPIU = 0x3F, }; -/* UPIU Read/Write flags */ +/* UPIU Read/Write flags. See also table "UPIU Flags" in the UFS standard. */ enum { UPIU_CMD_FLAGS_NONE = 0x00, + UPIU_CMD_FLAGS_CP = 0x04, UPIU_CMD_FLAGS_WRITE = 0x20, UPIU_CMD_FLAGS_READ = 0x40, }; diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 7d07b256e9..7f0b2c5599 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -28,6 +28,7 @@ #define UFSHCD "ufshcd" +struct scsi_device; struct ufs_hba; enum dev_cmd_type { @@ -371,6 +372,7 @@ struct ufs_hba_variant_ops { int (*get_outstanding_cqs)(struct ufs_hba *hba, unsigned long *ocqs); int (*config_esi)(struct ufs_hba *hba); + void (*config_scsi_dev)(struct scsi_device *sdev); }; /* clock gating state */ @@ -427,6 +429,7 @@ struct ufs_clk_gating { * @workq: workqueue to schedule devfreq suspend/resume work * @suspend_work: worker to suspend devfreq * @resume_work: worker to resume devfreq + * @target_freq: frequency requested by devfreq framework * @min_gear: lowest HS gear to scale down to * @is_enabled: tracks if scaling is currently enabled or not, controlled by * clkscale_enable sysfs node @@ -446,6 +449,7 @@ struct ufs_clk_scaling { struct workqueue_struct *workq; struct work_struct suspend_work; struct work_struct resume_work; + unsigned long target_freq; u32 min_gear; bool is_enabled; bool is_allowed; @@ -597,11 +601,6 @@ enum ufshcd_quirks { UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13, /* - * Align DMA SG entries on a 4 KiB boundary. - */ - UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14, - - /* * This quirk needs to be enabled if the host controller does not * support UIC command */ @@ -865,6 +864,7 @@ enum ufshcd_mcq_opr { * @auto_bkops_enabled: to track whether bkops is enabled in device * @vreg_info: UFS device voltage regulator information * @clk_list_head: UFS host controller clocks list node head + * @use_pm_opp: Indicates whether OPP based scaling is used or not * @req_abort_count: number of times ufshcd_abort() has been called * @lanes_per_direction: number of lanes per data direction between the UFS * controller and the UFS device. @@ -1015,6 +1015,7 @@ struct ufs_hba { bool auto_bkops_enabled; struct ufs_vreg_info vreg_info; struct list_head clk_list_head; + bool use_pm_opp; /* Number of requests aborts */ int req_abort_count; @@ -1253,6 +1254,9 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); +int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table, + struct dev_pm_opp *opp, void *data, + bool scaling_down); /** * ufshcd_set_variant - set variant specific data to the hba * @hba: per adapter instance diff --git a/include/xen/events.h b/include/xen/events.h index 23932b0673..3b07409f80 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -88,7 +88,6 @@ void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); -void xen_set_irq_pending(int irq); bool xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the @@ -101,8 +100,8 @@ void xen_poll_irq_timeout(int irq, u64 timeout); /* Determine the IRQ which is bound to an event channel */ unsigned int irq_from_evtchn(evtchn_port_t evtchn); -int irq_from_virq(unsigned int cpu, unsigned int virq); -evtchn_port_t evtchn_from_irq(unsigned irq); +int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq, + evtchn_port_t *evtchn); int xen_set_callback_via(uint64_t via); int xen_evtchn_do_upcall(void); @@ -122,9 +121,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, /* De-allocates the above mentioned physical interrupt. */ int xen_destroy_irq(int irq); -/* Return irq from pirq */ -int xen_irq_from_pirq(unsigned pirq); - /* Return the pirq allocated to the irq. */ int xen_pirq_from_irq(unsigned irq); diff --git a/include/xen/interface/hvm/ioreq.h b/include/xen/interface/hvm/ioreq.h new file mode 100644 index 0000000000..b02cfeae7e --- /dev/null +++ b/include/xen/interface/hvm/ioreq.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * ioreq.h: I/O request definitions for device models + * Copyright (c) 2004, Intel Corporation. + */ + +#ifndef __XEN_PUBLIC_HVM_IOREQ_H__ +#define __XEN_PUBLIC_HVM_IOREQ_H__ + +#define IOREQ_READ 1 +#define IOREQ_WRITE 0 + +#define STATE_IOREQ_NONE 0 +#define STATE_IOREQ_READY 1 +#define STATE_IOREQ_INPROCESS 2 +#define STATE_IORESP_READY 3 + +#define IOREQ_TYPE_PIO 0 /* pio */ +#define IOREQ_TYPE_COPY 1 /* mmio ops */ +#define IOREQ_TYPE_PCI_CONFIG 2 +#define IOREQ_TYPE_TIMEOFFSET 7 +#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ + +/* + * VMExit dispatcher should cooperate with instruction decoder to + * prepare this structure and notify service OS and DM by sending + * virq. + * + * For I/O type IOREQ_TYPE_PCI_CONFIG, the physical address is formatted + * as follows: + * + * 63....48|47..40|39..35|34..32|31........0 + * SEGMENT |BUS |DEV |FN |OFFSET + */ +struct ioreq { + uint64_t addr; /* physical address */ + uint64_t data; /* data (or paddr of data) */ + uint32_t count; /* for rep prefixes */ + uint32_t size; /* size in bytes */ + uint32_t vp_eport; /* evtchn for notifications to/from device model */ + uint16_t _pad0; + uint8_t state:4; + uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr + * of the real data to use. */ + uint8_t dir:1; /* 1=read, 0=write */ + uint8_t df:1; + uint8_t _pad1:1; + uint8_t type; /* I/O type */ +}; + +#endif /* __XEN_PUBLIC_HVM_IOREQ_H__ */ |