diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /arch/s390/include/asm/idals.h | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/s390/include/asm/idals.h')
-rw-r--r-- | arch/s390/include/asm/idals.h | 176 |
1 files changed, 96 insertions, 80 deletions
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index 59fcc3c72e..ac68c657b2 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* +/* * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> @@ -17,32 +17,37 @@ #include <linux/err.h> #include <linux/types.h> #include <linux/slab.h> -#include <asm/cio.h> #include <linux/uaccess.h> +#include <asm/dma-types.h> +#include <asm/cio.h> -#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ -#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) +#define IDA_SIZE_SHIFT 12 +#define IDA_BLOCK_SIZE (1UL << IDA_SIZE_SHIFT) -#define IDA_2K_SIZE_LOG 11 -#define IDA_2K_BLOCK_SIZE (1L << IDA_2K_SIZE_LOG) +#define IDA_2K_SIZE_SHIFT 11 +#define IDA_2K_BLOCK_SIZE (1UL << IDA_2K_SIZE_SHIFT) /* * Test if an address/length pair needs an idal list. */ -static inline int -idal_is_needed(void *vaddr, unsigned int length) +static inline bool idal_is_needed(void *vaddr, unsigned int length) { - return ((__pa(vaddr) + length - 1) >> 31) != 0; -} + dma64_t paddr = virt_to_dma64(vaddr); + return (((__force unsigned long)(paddr) + length - 1) >> 31) != 0; +} /* * Return the number of idal words needed for an address/length pair. */ static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) { - return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + - (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; + unsigned int cidaw; + + cidaw = (unsigned long)vaddr & (IDA_BLOCK_SIZE - 1); + cidaw += length + IDA_BLOCK_SIZE - 1; + cidaw >>= IDA_SIZE_SHIFT; + return cidaw; } /* @@ -50,26 +55,27 @@ static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) */ static inline unsigned int idal_2k_nr_words(void *vaddr, unsigned int length) { - return ((__pa(vaddr) & (IDA_2K_BLOCK_SIZE - 1)) + length + - (IDA_2K_BLOCK_SIZE - 1)) >> IDA_2K_SIZE_LOG; + unsigned int cidaw; + + cidaw = (unsigned long)vaddr & (IDA_2K_BLOCK_SIZE - 1); + cidaw += length + IDA_2K_BLOCK_SIZE - 1; + cidaw >>= IDA_2K_SIZE_SHIFT; + return cidaw; } /* * Create the list of idal words for an address/length pair. */ -static inline unsigned long *idal_create_words(unsigned long *idaws, - void *vaddr, unsigned int length) +static inline dma64_t *idal_create_words(dma64_t *idaws, void *vaddr, unsigned int length) { - unsigned long paddr; + dma64_t paddr = virt_to_dma64(vaddr); unsigned int cidaw; - paddr = __pa(vaddr); - cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length + - (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; *idaws++ = paddr; - paddr &= -IDA_BLOCK_SIZE; + cidaw = idal_nr_words(vaddr, length); + paddr = dma64_and(paddr, -IDA_BLOCK_SIZE); while (--cidaw > 0) { - paddr += IDA_BLOCK_SIZE; + paddr = dma64_add(paddr, IDA_BLOCK_SIZE); *idaws++ = paddr; } return idaws; @@ -79,36 +85,33 @@ static inline unsigned long *idal_create_words(unsigned long *idaws, * Sets the address of the data in CCW. * If necessary it allocates an IDAL and sets the appropriate flags. */ -static inline int -set_normalized_cda(struct ccw1 * ccw, void *vaddr) +static inline int set_normalized_cda(struct ccw1 *ccw, void *vaddr) { unsigned int nridaws; - unsigned long *idal; + dma64_t *idal; if (ccw->flags & CCW_FLAG_IDA) return -EINVAL; nridaws = idal_nr_words(vaddr, ccw->count); if (nridaws > 0) { - idal = kmalloc(nridaws * sizeof(unsigned long), - GFP_ATOMIC | GFP_DMA ); - if (idal == NULL) + idal = kcalloc(nridaws, sizeof(*idal), GFP_ATOMIC | GFP_DMA); + if (!idal) return -ENOMEM; idal_create_words(idal, vaddr, ccw->count); ccw->flags |= CCW_FLAG_IDA; vaddr = idal; } - ccw->cda = (__u32)(unsigned long) vaddr; + ccw->cda = virt_to_dma32(vaddr); return 0; } /* * Releases any allocated IDAL related to the CCW. */ -static inline void -clear_normalized_cda(struct ccw1 * ccw) +static inline void clear_normalized_cda(struct ccw1 *ccw) { if (ccw->flags & CCW_FLAG_IDA) { - kfree((void *)(unsigned long) ccw->cda); + kfree(dma32_to_virt(ccw->cda)); ccw->flags &= ~CCW_FLAG_IDA; } ccw->cda = 0; @@ -120,125 +123,138 @@ clear_normalized_cda(struct ccw1 * ccw) struct idal_buffer { size_t size; size_t page_order; - void *data[]; + dma64_t data[]; }; /* * Allocate an idal buffer */ -static inline struct idal_buffer * -idal_buffer_alloc(size_t size, int page_order) +static inline struct idal_buffer *idal_buffer_alloc(size_t size, int page_order) { - struct idal_buffer *ib; int nr_chunks, nr_ptrs, i; + struct idal_buffer *ib; + void *vaddr; - nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; - nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG; + nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT; + nr_chunks = (PAGE_SIZE << page_order) >> IDA_SIZE_SHIFT; ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL); - if (ib == NULL) + if (!ib) return ERR_PTR(-ENOMEM); ib->size = size; ib->page_order = page_order; for (i = 0; i < nr_ptrs; i++) { - if ((i & (nr_chunks - 1)) != 0) { - ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE; - continue; - } - ib->data[i] = (void *) - __get_free_pages(GFP_KERNEL, page_order); - if (ib->data[i] != NULL) + if (i & (nr_chunks - 1)) { + ib->data[i] = dma64_add(ib->data[i - 1], IDA_BLOCK_SIZE); continue; - // Not enough memory - while (i >= nr_chunks) { - i -= nr_chunks; - free_pages((unsigned long) ib->data[i], - ib->page_order); } - kfree(ib); - return ERR_PTR(-ENOMEM); + vaddr = (void *)__get_free_pages(GFP_KERNEL, page_order); + if (!vaddr) + goto error; + ib->data[i] = virt_to_dma64(vaddr); } return ib; +error: + while (i >= nr_chunks) { + i -= nr_chunks; + vaddr = dma64_to_virt(ib->data[i]); + free_pages((unsigned long)vaddr, ib->page_order); + } + kfree(ib); + return ERR_PTR(-ENOMEM); } /* * Free an idal buffer. */ -static inline void -idal_buffer_free(struct idal_buffer *ib) +static inline void idal_buffer_free(struct idal_buffer *ib) { int nr_chunks, nr_ptrs, i; + void *vaddr; - nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; - nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG; - for (i = 0; i < nr_ptrs; i += nr_chunks) - free_pages((unsigned long) ib->data[i], ib->page_order); + nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT; + nr_chunks = (PAGE_SIZE << ib->page_order) >> IDA_SIZE_SHIFT; + for (i = 0; i < nr_ptrs; i += nr_chunks) { + vaddr = dma64_to_virt(ib->data[i]); + free_pages((unsigned long)vaddr, ib->page_order); + } kfree(ib); } /* * Test if a idal list is really needed. */ -static inline int -__idal_buffer_is_needed(struct idal_buffer *ib) +static inline bool __idal_buffer_is_needed(struct idal_buffer *ib) { - return ib->size > (4096ul << ib->page_order) || - idal_is_needed(ib->data[0], ib->size); + if (ib->size > (PAGE_SIZE << ib->page_order)) + return true; + return idal_is_needed(dma64_to_virt(ib->data[0]), ib->size); } /* * Set channel data address to idal buffer. */ -static inline void -idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) +static inline void idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) { + void *vaddr; + if (__idal_buffer_is_needed(ib)) { - // setup idals; - ccw->cda = (u32)(addr_t) ib->data; + /* Setup idals */ + ccw->cda = virt_to_dma32(ib->data); ccw->flags |= CCW_FLAG_IDA; - } else - // we do not need idals - use direct addressing - ccw->cda = (u32)(addr_t) ib->data[0]; + } else { + /* + * No idals needed - use direct addressing. Convert from + * dma64_t to virt and then to dma32_t only because of type + * checking. The physical address is known to be below 2GB. + */ + vaddr = dma64_to_virt(ib->data[0]); + ccw->cda = virt_to_dma32(vaddr); + } ccw->count = ib->size; } /* * Copy count bytes from an idal buffer to user memory */ -static inline size_t -idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) +static inline size_t idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) { size_t left; + void *vaddr; int i; BUG_ON(count > ib->size); for (i = 0; count > IDA_BLOCK_SIZE; i++) { - left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE); + vaddr = dma64_to_virt(ib->data[i]); + left = copy_to_user(to, vaddr, IDA_BLOCK_SIZE); if (left) return left + count - IDA_BLOCK_SIZE; - to = (void __user *) to + IDA_BLOCK_SIZE; + to = (void __user *)to + IDA_BLOCK_SIZE; count -= IDA_BLOCK_SIZE; } - return copy_to_user(to, ib->data[i], count); + vaddr = dma64_to_virt(ib->data[i]); + return copy_to_user(to, vaddr, count); } /* * Copy count bytes from user memory to an idal buffer */ -static inline size_t -idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) +static inline size_t idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) { size_t left; + void *vaddr; int i; BUG_ON(count > ib->size); for (i = 0; count > IDA_BLOCK_SIZE; i++) { - left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE); + vaddr = dma64_to_virt(ib->data[i]); + left = copy_from_user(vaddr, from, IDA_BLOCK_SIZE); if (left) return left + count - IDA_BLOCK_SIZE; - from = (void __user *) from + IDA_BLOCK_SIZE; + from = (void __user *)from + IDA_BLOCK_SIZE; count -= IDA_BLOCK_SIZE; } - return copy_from_user(ib->data[i], from, count); + vaddr = dma64_to_virt(ib->data[i]); + return copy_from_user(vaddr, from, count); } #endif |