From 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 12:05:51 +0200 Subject: Adding upstream version 5.10.209. Signed-off-by: Daniel Baumann --- arch/hexagon/include/asm/Kbuild | 5 + arch/hexagon/include/asm/asm-offsets.h | 1 + arch/hexagon/include/asm/atomic.h | 185 +++++++++++++ arch/hexagon/include/asm/bitops.h | 284 +++++++++++++++++++ arch/hexagon/include/asm/cache.h | 23 ++ arch/hexagon/include/asm/cacheflush.h | 79 ++++++ arch/hexagon/include/asm/checksum.h | 26 ++ arch/hexagon/include/asm/cmpxchg.h | 75 ++++++ arch/hexagon/include/asm/delay.h | 16 ++ arch/hexagon/include/asm/dma.h | 16 ++ arch/hexagon/include/asm/elf.h | 217 +++++++++++++++ arch/hexagon/include/asm/exec.h | 15 ++ arch/hexagon/include/asm/fixmap.h | 18 ++ arch/hexagon/include/asm/fpu.h | 4 + arch/hexagon/include/asm/futex.h | 107 ++++++++ arch/hexagon/include/asm/hexagon_vm.h | 276 +++++++++++++++++++ arch/hexagon/include/asm/intrinsics.h | 13 + arch/hexagon/include/asm/io.h | 314 +++++++++++++++++++++ arch/hexagon/include/asm/irq.h | 23 ++ arch/hexagon/include/asm/irqflags.h | 49 ++++ arch/hexagon/include/asm/kgdb.h | 31 +++ arch/hexagon/include/asm/linkage.h | 12 + arch/hexagon/include/asm/mem-layout.h | 107 ++++++++ arch/hexagon/include/asm/mmu.h | 24 ++ arch/hexagon/include/asm/mmu_context.h | 89 ++++++ arch/hexagon/include/asm/page.h | 149 ++++++++++ arch/hexagon/include/asm/perf_event.h | 9 + arch/hexagon/include/asm/pgalloc.h | 96 +++++++ arch/hexagon/include/asm/pgtable.h | 419 +++++++++++++++++++++++++++++ arch/hexagon/include/asm/processor.h | 133 +++++++++ arch/hexagon/include/asm/smp.h | 30 +++ arch/hexagon/include/asm/spinlock.h | 158 +++++++++++ arch/hexagon/include/asm/spinlock_types.h | 27 ++ arch/hexagon/include/asm/string.h | 19 ++ arch/hexagon/include/asm/suspend.h | 14 + arch/hexagon/include/asm/switch_to.h | 21 ++ arch/hexagon/include/asm/syscall.h | 53 ++++ arch/hexagon/include/asm/thread_info.h | 117 ++++++++ arch/hexagon/include/asm/time.h | 16 ++ arch/hexagon/include/asm/timex.h | 23 ++ arch/hexagon/include/asm/tlb.h | 14 + arch/hexagon/include/asm/tlbflush.h | 45 ++++ arch/hexagon/include/asm/traps.h | 16 ++ arch/hexagon/include/asm/uaccess.h | 94 +++++++ arch/hexagon/include/asm/vdso.h | 17 ++ arch/hexagon/include/asm/vermagic.h | 13 + arch/hexagon/include/asm/vm_fault.h | 13 + arch/hexagon/include/asm/vm_mmu.h | 97 +++++++ arch/hexagon/include/asm/vmalloc.h | 4 + arch/hexagon/include/uapi/asm/Kbuild | 2 + arch/hexagon/include/uapi/asm/byteorder.h | 29 ++ arch/hexagon/include/uapi/asm/param.h | 27 ++ arch/hexagon/include/uapi/asm/ptrace.h | 45 ++++ arch/hexagon/include/uapi/asm/registers.h | 229 ++++++++++++++++ arch/hexagon/include/uapi/asm/setup.h | 35 +++ arch/hexagon/include/uapi/asm/sigcontext.h | 34 +++ arch/hexagon/include/uapi/asm/signal.h | 29 ++ arch/hexagon/include/uapi/asm/swab.h | 25 ++ arch/hexagon/include/uapi/asm/unistd.h | 40 +++ arch/hexagon/include/uapi/asm/user.h | 70 +++++ 60 files changed, 4171 insertions(+) create mode 100644 arch/hexagon/include/asm/Kbuild create mode 100644 arch/hexagon/include/asm/asm-offsets.h create mode 100644 arch/hexagon/include/asm/atomic.h create mode 100644 arch/hexagon/include/asm/bitops.h create mode 100644 arch/hexagon/include/asm/cache.h create mode 100644 arch/hexagon/include/asm/cacheflush.h create mode 100644 arch/hexagon/include/asm/checksum.h create mode 100644 arch/hexagon/include/asm/cmpxchg.h create mode 100644 arch/hexagon/include/asm/delay.h create mode 100644 arch/hexagon/include/asm/dma.h create mode 100644 arch/hexagon/include/asm/elf.h create mode 100644 arch/hexagon/include/asm/exec.h create mode 100644 arch/hexagon/include/asm/fixmap.h create mode 100644 arch/hexagon/include/asm/fpu.h create mode 100644 arch/hexagon/include/asm/futex.h create mode 100644 arch/hexagon/include/asm/hexagon_vm.h create mode 100644 arch/hexagon/include/asm/intrinsics.h create mode 100644 arch/hexagon/include/asm/io.h create mode 100644 arch/hexagon/include/asm/irq.h create mode 100644 arch/hexagon/include/asm/irqflags.h create mode 100644 arch/hexagon/include/asm/kgdb.h create mode 100644 arch/hexagon/include/asm/linkage.h create mode 100644 arch/hexagon/include/asm/mem-layout.h create mode 100644 arch/hexagon/include/asm/mmu.h create mode 100644 arch/hexagon/include/asm/mmu_context.h create mode 100644 arch/hexagon/include/asm/page.h create mode 100644 arch/hexagon/include/asm/perf_event.h create mode 100644 arch/hexagon/include/asm/pgalloc.h create mode 100644 arch/hexagon/include/asm/pgtable.h create mode 100644 arch/hexagon/include/asm/processor.h create mode 100644 arch/hexagon/include/asm/smp.h create mode 100644 arch/hexagon/include/asm/spinlock.h create mode 100644 arch/hexagon/include/asm/spinlock_types.h create mode 100644 arch/hexagon/include/asm/string.h create mode 100644 arch/hexagon/include/asm/suspend.h create mode 100644 arch/hexagon/include/asm/switch_to.h create mode 100644 arch/hexagon/include/asm/syscall.h create mode 100644 arch/hexagon/include/asm/thread_info.h create mode 100644 arch/hexagon/include/asm/time.h create mode 100644 arch/hexagon/include/asm/timex.h create mode 100644 arch/hexagon/include/asm/tlb.h create mode 100644 arch/hexagon/include/asm/tlbflush.h create mode 100644 arch/hexagon/include/asm/traps.h create mode 100644 arch/hexagon/include/asm/uaccess.h create mode 100644 arch/hexagon/include/asm/vdso.h create mode 100644 arch/hexagon/include/asm/vermagic.h create mode 100644 arch/hexagon/include/asm/vm_fault.h create mode 100644 arch/hexagon/include/asm/vm_mmu.h create mode 100644 arch/hexagon/include/asm/vmalloc.h create mode 100644 arch/hexagon/include/uapi/asm/Kbuild create mode 100644 arch/hexagon/include/uapi/asm/byteorder.h create mode 100644 arch/hexagon/include/uapi/asm/param.h create mode 100644 arch/hexagon/include/uapi/asm/ptrace.h create mode 100644 arch/hexagon/include/uapi/asm/registers.h create mode 100644 arch/hexagon/include/uapi/asm/setup.h create mode 100644 arch/hexagon/include/uapi/asm/sigcontext.h create mode 100644 arch/hexagon/include/uapi/asm/signal.h create mode 100644 arch/hexagon/include/uapi/asm/swab.h create mode 100644 arch/hexagon/include/uapi/asm/unistd.h create mode 100644 arch/hexagon/include/uapi/asm/user.h (limited to 'arch/hexagon/include') diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild new file mode 100644 index 000000000..3ece3c93f --- /dev/null +++ b/arch/hexagon/include/asm/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += extable.h +generic-y += iomap.h +generic-y += kvm_para.h +generic-y += mcs_spinlock.h diff --git a/arch/hexagon/include/asm/asm-offsets.h b/arch/hexagon/include/asm/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/arch/hexagon/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h new file mode 100644 index 000000000..4ab895d71 --- /dev/null +++ b/arch/hexagon/include/asm/atomic.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Atomic operations for the Hexagon architecture + * + * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_ATOMIC_H +#define _ASM_ATOMIC_H + +#include +#include +#include + +/* Normal writes in our arch don't clear lock reservations */ + +static inline void atomic_set(atomic_t *v, int new) +{ + asm volatile( + "1: r6 = memw_locked(%0);\n" + " memw_locked(%0,p0) = %1;\n" + " if (!P0) jump 1b;\n" + : + : "r" (&v->counter), "r" (new) + : "memory", "p0", "r6" + ); +} + +#define atomic_set_release(v, i) atomic_set((v), (i)) + +/** + * atomic_read - reads a word, atomically + * @v: pointer to atomic value + * + * Assumes all word reads on our architecture are atomic. + */ +#define atomic_read(v) READ_ONCE((v)->counter) + +/** + * atomic_xchg - atomic + * @v: pointer to memory to change + * @new: new value (technically passed in a register -- see xchg) + */ +#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) + + +/** + * atomic_cmpxchg - atomic compare-and-exchange values + * @v: pointer to value to change + * @old: desired old value to match + * @new: new value to put in + * + * Parameters are then pointer, value-in-register, value-in-register, + * and the output is the old value. + * + * Apparently this is complicated for archs that don't support + * the memw_locked like we do (or it's broken or whatever). + * + * Kind of the lynchpin of the rest of the generically defined routines. + * Remember V2 had that bug with dotnew predicate set by memw_locked. + * + * "old" is "expected" old val, __oldval is actual old value + */ +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int __oldval; + + asm volatile( + "1: %0 = memw_locked(%1);\n" + " { P0 = cmp.eq(%0,%2);\n" + " if (!P0.new) jump:nt 2f; }\n" + " memw_locked(%1,P0) = %3;\n" + " if (!P0) jump 1b;\n" + "2:\n" + : "=&r" (__oldval) + : "r" (&v->counter), "r" (old), "r" (new) + : "memory", "p0" + ); + + return __oldval; +} + +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int output; \ + \ + __asm__ __volatile__ ( \ + "1: %0 = memw_locked(%1);\n" \ + " %0 = "#op "(%0,%2);\n" \ + " memw_locked(%1,P3)=%0;\n" \ + " if (!P3) jump 1b;\n" \ + : "=&r" (output) \ + : "r" (&v->counter), "r" (i) \ + : "memory", "p3" \ + ); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int output; \ + \ + __asm__ __volatile__ ( \ + "1: %0 = memw_locked(%1);\n" \ + " %0 = "#op "(%0,%2);\n" \ + " memw_locked(%1,P3)=%0;\n" \ + " if (!P3) jump 1b;\n" \ + : "=&r" (output) \ + : "r" (&v->counter), "r" (i) \ + : "memory", "p3" \ + ); \ + return output; \ +} + +#define ATOMIC_FETCH_OP(op) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int output, val; \ + \ + __asm__ __volatile__ ( \ + "1: %0 = memw_locked(%2);\n" \ + " %1 = "#op "(%0,%3);\n" \ + " memw_locked(%2,P3)=%1;\n" \ + " if (!P3) jump 1b;\n" \ + : "=&r" (output), "=&r" (val) \ + : "r" (&v->counter), "r" (i) \ + : "memory", "p3" \ + ); \ + return output; \ +} + +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) + +ATOMIC_OPS(add) +ATOMIC_OPS(sub) + +#undef ATOMIC_OPS +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) + +ATOMIC_OPS(and) +ATOMIC_OPS(or) +ATOMIC_OPS(xor) + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +/** + * atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer to value + * @a: amount to add + * @u: unless value is equal to u + * + * Returns old value. + * + */ + +static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int __oldval; + register int tmp; + + asm volatile( + "1: %0 = memw_locked(%2);" + " {" + " p3 = cmp.eq(%0, %4);" + " if (p3.new) jump:nt 2f;" + " %1 = add(%0, %3);" + " }" + " memw_locked(%2, p3) = %1;" + " {" + " if (!p3) jump 1b;" + " }" + "2:" + : "=&r" (__oldval), "=&r" (tmp) + : "r" (v), "r" (a), "r" (u) + : "memory", "p3" + ); + return __oldval; +} +#define atomic_fetch_add_unless atomic_fetch_add_unless + +#endif diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h new file mode 100644 index 000000000..71429f756 --- /dev/null +++ b/arch/hexagon/include/asm/bitops.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Bit operations for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_BITOPS_H +#define _ASM_BITOPS_H + +#include +#include +#include +#include + +#ifdef __KERNEL__ + +/* + * The offset calculations for these are based on BITS_PER_LONG == 32 + * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access), + * mask by 0x0000001F) + * + * Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp + */ + +/** + * test_and_clear_bit - clear a bit and return its old value + * @nr: bit number to clear + * @addr: pointer to memory + */ +static inline int test_and_clear_bit(int nr, volatile void *addr) +{ + int oldval; + + __asm__ __volatile__ ( + " {R10 = %1; R11 = asr(%2,#5); }\n" + " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n" + "1: R12 = memw_locked(R10);\n" + " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" + " memw_locked(R10,P1) = R12;\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" + : "=&r" (oldval) + : "r" (addr), "r" (nr) + : "r10", "r11", "r12", "p0", "p1", "memory" + ); + + return oldval; +} + +/** + * test_and_set_bit - set a bit and return its old value + * @nr: bit number to set + * @addr: pointer to memory + */ +static inline int test_and_set_bit(int nr, volatile void *addr) +{ + int oldval; + + __asm__ __volatile__ ( + " {R10 = %1; R11 = asr(%2,#5); }\n" + " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n" + "1: R12 = memw_locked(R10);\n" + " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" + " memw_locked(R10,P1) = R12;\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" + : "=&r" (oldval) + : "r" (addr), "r" (nr) + : "r10", "r11", "r12", "p0", "p1", "memory" + ); + + + return oldval; + +} + +/** + * test_and_change_bit - toggle a bit and return its old value + * @nr: bit number to set + * @addr: pointer to memory + */ +static inline int test_and_change_bit(int nr, volatile void *addr) +{ + int oldval; + + __asm__ __volatile__ ( + " {R10 = %1; R11 = asr(%2,#5); }\n" + " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n" + "1: R12 = memw_locked(R10);\n" + " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" + " memw_locked(R10,P1) = R12;\n" + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" + : "=&r" (oldval) + : "r" (addr), "r" (nr) + : "r10", "r11", "r12", "p0", "p1", "memory" + ); + + return oldval; + +} + +/* + * Atomic, but doesn't care about the return value. + * Rewrite later to save a cycle or two. + */ + +static inline void clear_bit(int nr, volatile void *addr) +{ + test_and_clear_bit(nr, addr); +} + +static inline void set_bit(int nr, volatile void *addr) +{ + test_and_set_bit(nr, addr); +} + +static inline void change_bit(int nr, volatile void *addr) +{ + test_and_change_bit(nr, addr); +} + + +/* + * These are allowed to be non-atomic. In fact the generic flavors are + * in non-atomic.h. Would it be better to use intrinsics for this? + * + * OK, writes in our architecture do not invalidate LL/SC, so this has to + * be atomic, particularly for things like slab_lock and slab_unlock. + * + */ +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + test_and_clear_bit(nr, addr); +} + +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + test_and_set_bit(nr, addr); +} + +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + test_and_change_bit(nr, addr); +} + +/* Apparently, at least some of these are allowed to be non-atomic */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + return test_and_clear_bit(nr, addr); +} + +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + return test_and_set_bit(nr, addr); +} + +static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) +{ + return test_and_change_bit(nr, addr); +} + +static inline int __test_bit(int nr, const volatile unsigned long *addr) +{ + int retval; + + asm volatile( + "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n" + : "=&r" (retval) + : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG) + : "p0" + ); + + return retval; +} + +#define test_bit(nr, addr) __test_bit(nr, addr) + +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static inline long ffz(int x) +{ + int r; + + asm("%0 = ct1(%1);\n" + : "=&r" (r) + : "r" (x)); + return r; +} + +/* + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static inline int fls(unsigned int x) +{ + int r; + + asm("{ %0 = cl0(%1);}\n" + "%0 = sub(#32,%0);\n" + : "=&r" (r) + : "r" (x) + : "p0"); + + return r; +} + +/* + * ffs - find first bit set + * @x: the word to search + * + * This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static inline int ffs(int x) +{ + int r; + + asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" + : "=&r" (r) + : "r" (x) + : "p0"); + + return r; +} + +/* + * __ffs - find first bit in word. + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + * + * bits_per_long assumed to be 32 + * numbering starts at 0 I think (instead of 1 like ffs) + */ +static inline unsigned long __ffs(unsigned long word) +{ + int num; + + asm("%0 = ct0(%1);\n" + : "=&r" (num) + : "r" (word)); + + return num; +} + +/* + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + * bits_per_long assumed to be 32 + */ +static inline unsigned long __fls(unsigned long word) +{ + int num; + + asm("%0 = cl0(%1);\n" + "%0 = sub(#31,%0);\n" + : "=&r" (num) + : "r" (word)); + + return num; +} + +#include +#include + +#include +#include +#include + +#include +#include + +#endif /* __KERNEL__ */ +#endif diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h new file mode 100644 index 000000000..931719dfa --- /dev/null +++ b/arch/hexagon/include/asm/cache.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Cache definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved. + */ + +#ifndef __ASM_CACHE_H +#define __ASM_CACHE_H + +/* Bytes per L1 cache line */ +#define L1_CACHE_SHIFT (5) +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#define __cacheline_aligned __aligned(L1_CACHE_BYTES) +#define ____cacheline_aligned __aligned(L1_CACHE_BYTES) + +/* See http://lwn.net/Articles/262554/ */ +#define __read_mostly + +#endif diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h new file mode 100644 index 000000000..6eff0730e --- /dev/null +++ b/arch/hexagon/include/asm/cacheflush.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Cache flush operations for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_CACHEFLUSH_H +#define _ASM_CACHEFLUSH_H + +#include + +/* Cache flushing: + * + * - flush_cache_all() flushes entire cache + * - flush_cache_mm(mm) flushes the specified mm context's cache lines + * - flush_cache_page(mm, vmaddr, pfn) flushes a single page + * - flush_cache_range(vma, start, end) flushes a range of pages + * - flush_icache_range(start, end) flush a range of instructions + * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache + * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache + * + * Need to doublecheck which one is really needed for ptrace stuff to work. + */ +#define LINESIZE 32 +#define LINEBITS 5 + +/* + * Flush Dcache range through current map. + */ +extern void flush_dcache_range(unsigned long start, unsigned long end); +#define flush_dcache_range flush_dcache_range + +/* + * Flush Icache range through current map. + */ +extern void flush_icache_range(unsigned long start, unsigned long end); +#define flush_icache_range flush_icache_range + +/* + * Memory-management related flushes are there to ensure in non-physically + * indexed cache schemes that stale lines belonging to a given ASID aren't + * in the cache to confuse things. The prototype Hexagon Virtual Machine + * only uses a single ASID for all user-mode maps, which should + * mean that they aren't necessary. A brute-force, flush-everything + * implementation, with the name xxxxx_hexagon() is present in + * arch/hexagon/mm/cache.c, but let's not wire it up until we know + * it is needed. + */ +extern void flush_cache_all_hexagon(void); + +/* + * This may or may not ever have to be non-null, depending on the + * virtual machine MMU. For a native kernel, it's definitiely a no-op + * + * This is also the place where deferred cache coherency stuff seems + * to happen, classically... but instead we do it like ia64 and + * clean the cache when the PTE is set. + * + */ +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + /* generic_ptrace_pokedata doesn't wind up here, does it? */ +} + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, void *src, int len); +#define copy_to_user_page copy_to_user_page + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end); +extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end); + +#include + +#endif diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h new file mode 100644 index 000000000..4bc6ad96c --- /dev/null +++ b/arch/hexagon/include/asm/checksum.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_CHECKSUM_H +#define _ASM_CHECKSUM_H + +#define do_csum do_csum +unsigned int do_csum(const void *voidptr, int len); + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +#define csum_tcpudp_nofold csum_tcpudp_nofold +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +#define csum_tcpudp_magic csum_tcpudp_magic +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +#include + +#endif diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h new file mode 100644 index 000000000..92b8a02e5 --- /dev/null +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * xchg/cmpxchg operations for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_CMPXCHG_H +#define _ASM_CMPXCHG_H + +/* + * __xchg - atomically exchange a register and a memory location + * @x: value to swap + * @ptr: pointer to memory + * @size: size of the value + * + * Only 4 bytes supported currently. + * + * Note: there was an errata for V2 about .new's and memw_locked. + * + */ +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + unsigned long retval; + + /* Can't seem to use printk or panic here, so just stop */ + if (size != 4) do { asm volatile("brkpt;\n"); } while (1); + + __asm__ __volatile__ ( + "1: %0 = memw_locked(%1);\n" /* load into retval */ + " memw_locked(%1,P0) = %2;\n" /* store into memory */ + " if (!P0) jump 1b;\n" + : "=&r" (retval) + : "r" (ptr), "r" (x) + : "memory", "p0" + ); + return retval; +} + +/* + * Atomically swap the contents of a register with memory. Should be atomic + * between multiple CPU's and within interrupts on the same CPU. + */ +#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ + sizeof(*(ptr)))) + +/* + * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps. + * looks just like atomic_cmpxchg on our arch currently with a bunch of + * variable casting. + */ + +#define cmpxchg(ptr, old, new) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __oldval = 0; \ + \ + asm volatile( \ + "1: %0 = memw_locked(%1);\n" \ + " { P0 = cmp.eq(%0,%2);\n" \ + " if (!P0.new) jump:nt 2f; }\n" \ + " memw_locked(%1,p0) = %3;\n" \ + " if (!P0) jump 1b;\n" \ + "2:\n" \ + : "=&r" (__oldval) \ + : "r" (__ptr), "r" (__old), "r" (__new) \ + : "memory", "p0" \ + ); \ + __oldval; \ +}) + +#endif /* _ASM_CMPXCHG_H */ diff --git a/arch/hexagon/include/asm/delay.h b/arch/hexagon/include/asm/delay.h new file mode 100644 index 000000000..2f24bbf9f --- /dev/null +++ b/arch/hexagon/include/asm/delay.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_DELAY_H +#define _ASM_DELAY_H + +#include + +extern void __delay(unsigned long cycles); +extern void __udelay(unsigned long usecs); + +#define udelay(usecs) __udelay((usecs)) + +#endif /* _ASM_DELAY_H */ diff --git a/arch/hexagon/include/asm/dma.h b/arch/hexagon/include/asm/dma.h new file mode 100644 index 000000000..686ae566a --- /dev/null +++ b/arch/hexagon/include/asm/dma.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_DMA_H +#define _ASM_DMA_H + +#include + +#define MAX_DMA_CHANNELS 1 +#define MAX_DMA_ADDRESS (PAGE_OFFSET) + +extern size_t hexagon_coherent_pool_size; + +#endif diff --git a/arch/hexagon/include/asm/elf.h b/arch/hexagon/include/asm/elf.h new file mode 100644 index 000000000..9efa203e1 --- /dev/null +++ b/arch/hexagon/include/asm/elf.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ELF definitions for the Hexagon architecture + * + * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + */ + +#ifndef __ASM_ELF_H +#define __ASM_ELF_H + +#include +#include +#include + +struct elf32_hdr; + +/* + * ELF header e_flags defines. + */ + +/* should have stuff like "CPU type" and maybe "ABI version", etc */ + +/* Hexagon relocations */ + /* V2 */ +#define R_HEXAGON_NONE 0 +#define R_HEXAGON_B22_PCREL 1 +#define R_HEXAGON_B15_PCREL 2 +#define R_HEXAGON_B7_PCREL 3 +#define R_HEXAGON_LO16 4 +#define R_HEXAGON_HI16 5 +#define R_HEXAGON_32 6 +#define R_HEXAGON_16 7 +#define R_HEXAGON_8 8 +#define R_HEXAGON_GPREL16_0 9 +#define R_HEXAGON_GPREL16_1 10 +#define R_HEXAGON_GPREL16_2 11 +#define R_HEXAGON_GPREL16_3 12 +#define R_HEXAGON_HL16 13 + /* V3 */ +#define R_HEXAGON_B13_PCREL 14 + /* V4 */ +#define R_HEXAGON_B9_PCREL 15 + /* V4 (extenders) */ +#define R_HEXAGON_B32_PCREL_X 16 +#define R_HEXAGON_32_6_X 17 + /* V4 (extended) */ +#define R_HEXAGON_B22_PCREL_X 18 +#define R_HEXAGON_B15_PCREL_X 19 +#define R_HEXAGON_B13_PCREL_X 20 +#define R_HEXAGON_B9_PCREL_X 21 +#define R_HEXAGON_B7_PCREL_X 22 +#define R_HEXAGON_16_X 23 +#define R_HEXAGON_12_X 24 +#define R_HEXAGON_11_X 25 +#define R_HEXAGON_10_X 26 +#define R_HEXAGON_9_X 27 +#define R_HEXAGON_8_X 28 +#define R_HEXAGON_7_X 29 +#define R_HEXAGON_6_X 30 + /* V2 PIC */ +#define R_HEXAGON_32_PCREL 31 +#define R_HEXAGON_COPY 32 +#define R_HEXAGON_GLOB_DAT 33 +#define R_HEXAGON_JMP_SLOT 34 +#define R_HEXAGON_RELATIVE 35 +#define R_HEXAGON_PLT_B22_PCREL 36 +#define R_HEXAGON_GOTOFF_LO16 37 +#define R_HEXAGON_GOTOFF_HI16 38 +#define R_HEXAGON_GOTOFF_32 39 +#define R_HEXAGON_GOT_LO16 40 +#define R_HEXAGON_GOT_HI16 41 +#define R_HEXAGON_GOT_32 42 +#define R_HEXAGON_GOT_16 43 + +/* + * ELF register definitions.. + */ +typedef unsigned long elf_greg_t; + +typedef struct user_regs_struct elf_gregset_t; +#define ELF_NGREG (sizeof(elf_gregset_t)/sizeof(unsigned long)) + +/* Placeholder */ +typedef unsigned long elf_fpregset_t; + +/* + * Bypass the whole "regsets" thing for now and use the define. + */ + +#if CONFIG_HEXAGON_ARCH_VERSION >= 4 +#define CS_COPYREGS(DEST,REGS) \ +do {\ + DEST.cs0 = REGS->cs0;\ + DEST.cs1 = REGS->cs1;\ +} while (0) +#else +#define CS_COPYREGS(DEST,REGS) +#endif + +#define ELF_CORE_COPY_REGS(DEST, REGS) \ +do { \ + DEST.r0 = REGS->r00; \ + DEST.r1 = REGS->r01; \ + DEST.r2 = REGS->r02; \ + DEST.r3 = REGS->r03; \ + DEST.r4 = REGS->r04; \ + DEST.r5 = REGS->r05; \ + DEST.r6 = REGS->r06; \ + DEST.r7 = REGS->r07; \ + DEST.r8 = REGS->r08; \ + DEST.r9 = REGS->r09; \ + DEST.r10 = REGS->r10; \ + DEST.r11 = REGS->r11; \ + DEST.r12 = REGS->r12; \ + DEST.r13 = REGS->r13; \ + DEST.r14 = REGS->r14; \ + DEST.r15 = REGS->r15; \ + DEST.r16 = REGS->r16; \ + DEST.r17 = REGS->r17; \ + DEST.r18 = REGS->r18; \ + DEST.r19 = REGS->r19; \ + DEST.r20 = REGS->r20; \ + DEST.r21 = REGS->r21; \ + DEST.r22 = REGS->r22; \ + DEST.r23 = REGS->r23; \ + DEST.r24 = REGS->r24; \ + DEST.r25 = REGS->r25; \ + DEST.r26 = REGS->r26; \ + DEST.r27 = REGS->r27; \ + DEST.r28 = REGS->r28; \ + DEST.r29 = pt_psp(REGS); \ + DEST.r30 = REGS->r30; \ + DEST.r31 = REGS->r31; \ + DEST.sa0 = REGS->sa0; \ + DEST.lc0 = REGS->lc0; \ + DEST.sa1 = REGS->sa1; \ + DEST.lc1 = REGS->lc1; \ + DEST.m0 = REGS->m0; \ + DEST.m1 = REGS->m1; \ + DEST.usr = REGS->usr; \ + DEST.p3_0 = REGS->preds; \ + DEST.gp = REGS->gp; \ + DEST.ugp = REGS->ugp; \ + CS_COPYREGS(DEST,REGS); \ + DEST.pc = pt_elr(REGS); \ + DEST.cause = pt_cause(REGS); \ + DEST.badva = pt_badva(REGS); \ +} while (0); + +/* + * This is used to ensure we don't load something for the wrong architecture. + * Checks the machine and ABI type. + */ +#define elf_check_arch(hdr) ((hdr)->e_machine == EM_HEXAGON) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_HEXAGON + +#if CONFIG_HEXAGON_ARCH_VERSION == 2 +#define ELF_CORE_EFLAGS 0x1 +#endif + +#if CONFIG_HEXAGON_ARCH_VERSION == 3 +#define ELF_CORE_EFLAGS 0x2 +#endif + +#if CONFIG_HEXAGON_ARCH_VERSION == 4 +#define ELF_CORE_EFLAGS 0x3 +#endif + +/* + * Some architectures have ld.so set up a pointer to a function + * to be registered using atexit, to facilitate cleanup. So that + * static executables will be well-behaved, we would null the register + * in question here, in the pt_regs structure passed. For now, + * leave it a null macro. + */ +#define ELF_PLAT_INIT(regs, load_addr) do { } while (0) + +#define USE_ELF_CORE_DUMP +#define CORE_DUMP_USE_REGSET + +/* Hrm is this going to cause problems for changing PAGE_SIZE? */ +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ +#define ELF_ET_DYN_BASE 0x08000000UL + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +#define ELF_HWCAP (0) + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ +#define ELF_PLATFORM (NULL) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + + +#endif diff --git a/arch/hexagon/include/asm/exec.h b/arch/hexagon/include/asm/exec.h new file mode 100644 index 000000000..bb79e23db --- /dev/null +++ b/arch/hexagon/include/asm/exec.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Process execution related definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +/* Should probably shoot for an 8-byte aligned stack pointer */ +#define STACK_MASK (~7) +#define arch_align_stack(x) (x & STACK_MASK) + +#endif /* _ASM_EXEC_H */ diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h new file mode 100644 index 000000000..920660a04 --- /dev/null +++ b/arch/hexagon/include/asm/fixmap.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Fixmap support for Hexagon - enough to support highmem features + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +/* + * A lot of the fixmap info is already in mem-layout.h + */ +#include + +#include + +#endif diff --git a/arch/hexagon/include/asm/fpu.h b/arch/hexagon/include/asm/fpu.h new file mode 100644 index 000000000..0e135ea8c --- /dev/null +++ b/arch/hexagon/include/asm/fpu.h @@ -0,0 +1,4 @@ +/* + * If the FPU is used inside the kernel, + * kernel_fpu_end() will be defined here. + */ diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h new file mode 100644 index 000000000..9fb00a0ae --- /dev/null +++ b/arch/hexagon/include/asm/futex.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_HEXAGON_FUTEX_H +#define _ASM_HEXAGON_FUTEX_H + +#ifdef __KERNEL__ + +#include +#include +#include + +/* XXX TODO-- need to add sync barriers! */ + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile( \ + "1: %0 = memw_locked(%3);\n" \ + /* For example: %1 = %4 */ \ + insn \ + "2: memw_locked(%3,p2) = %1;\n" \ + " if (!p2) jump 1b;\n" \ + " %1 = #0;\n" \ + "3:\n" \ + ".section .fixup,\"ax\"\n" \ + "4: %1 = #%5;\n" \ + " jump ##3b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + ".long 1b,4b,2b,4b\n" \ + ".previous\n" \ + : "=&r" (oldval), "=&r" (ret), "+m" (*uaddr) \ + : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ + : "p2", "memory") + + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("%1 = %4\n", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("%1 = add(%0,%4)\n", ret, oldval, uaddr, + oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("%1 = or(%0,%4)\n", ret, oldval, uaddr, + oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("%1 = not(%4); %1 = and(%0,%1)\n", ret, + oldval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("%1 = xor(%0,%4)\n", ret, oldval, uaddr, + oparg); + break; + default: + ret = -ENOSYS; + } + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, + u32 newval) +{ + int prev; + int ret; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: %1 = memw_locked(%3)\n" + " {\n" + " p2 = cmp.eq(%1,%4)\n" + " if (!p2.new) jump:NT 3f\n" + " }\n" + "2: memw_locked(%3,p2) = %5\n" + " if (!p2) jump 1b\n" + "3:\n" + ".section .fixup,\"ax\"\n" + "4: %0 = #%6\n" + " jump ##3b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".long 1b,4b,2b,4b\n" + ".previous\n" + : "+r" (ret), "=&r" (prev), "+m" (*uaddr) + : "r" (uaddr), "r" (oldval), "r" (newval), "i"(-EFAULT) + : "p2", "memory"); + + *uval = prev; + return ret; +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_HEXAGON_FUTEX_H */ diff --git a/arch/hexagon/include/asm/hexagon_vm.h b/arch/hexagon/include/asm/hexagon_vm.h new file mode 100644 index 000000000..9aa2493fe --- /dev/null +++ b/arch/hexagon/include/asm/hexagon_vm.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Declarations for to Hexagon Virtal Machine. + * + * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + */ + +#ifndef ASM_HEXAGON_VM_H +#define ASM_HEXAGON_VM_H + +/* + * In principle, a Linux kernel for the VM could + * selectively define the virtual instructions + * as inline assembler macros, but for a first pass, + * we'll use subroutines for both the VM and the native + * kernels. It's costing a subroutine call/return, + * but it makes for a single set of entry points + * for tracing/debugging. + */ + +#define HVM_TRAP1_VMVERSION 0 +#define HVM_TRAP1_VMRTE 1 +#define HVM_TRAP1_VMSETVEC 2 +#define HVM_TRAP1_VMSETIE 3 +#define HVM_TRAP1_VMGETIE 4 +#define HVM_TRAP1_VMINTOP 5 +#define HVM_TRAP1_VMCLRMAP 10 +#define HVM_TRAP1_VMNEWMAP 11 +#define HVM_TRAP1_FORMERLY_VMWIRE 12 +#define HVM_TRAP1_VMCACHE 13 +#define HVM_TRAP1_VMGETTIME 14 +#define HVM_TRAP1_VMSETTIME 15 +#define HVM_TRAP1_VMWAIT 16 +#define HVM_TRAP1_VMYIELD 17 +#define HVM_TRAP1_VMSTART 18 +#define HVM_TRAP1_VMSTOP 19 +#define HVM_TRAP1_VMVPID 20 +#define HVM_TRAP1_VMSETREGS 21 +#define HVM_TRAP1_VMGETREGS 22 +#define HVM_TRAP1_VMTIMEROP 24 + +#ifndef __ASSEMBLY__ + +enum VM_CACHE_OPS { + hvmc_ickill, + hvmc_dckill, + hvmc_l2kill, + hvmc_dccleaninva, + hvmc_icinva, + hvmc_idsync, + hvmc_fetch_cfg +}; + +enum VM_INT_OPS { + hvmi_nop, + hvmi_globen, + hvmi_globdis, + hvmi_locen, + hvmi_locdis, + hvmi_affinity, + hvmi_get, + hvmi_peek, + hvmi_status, + hvmi_post, + hvmi_clear +}; + +extern void _K_VM_event_vector(void); + +void __vmrte(void); +long __vmsetvec(void *); +long __vmsetie(long); +long __vmgetie(void); +long __vmintop(enum VM_INT_OPS, long, long, long, long); +long __vmclrmap(void *, unsigned long); +long __vmnewmap(void *); +long __vmcache(enum VM_CACHE_OPS op, unsigned long addr, unsigned long len); +unsigned long long __vmgettime(void); +long __vmsettime(unsigned long long); +long __vmstart(void *, void *); +void __vmstop(void); +long __vmwait(void); +void __vmyield(void); +long __vmvpid(void); + +static inline long __vmcache_ickill(void) +{ + return __vmcache(hvmc_ickill, 0, 0); +} + +static inline long __vmcache_dckill(void) +{ + return __vmcache(hvmc_dckill, 0, 0); +} + +static inline long __vmcache_l2kill(void) +{ + return __vmcache(hvmc_l2kill, 0, 0); +} + +static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len) +{ + return __vmcache(hvmc_dccleaninva, addr, len); +} + +static inline long __vmcache_icinva(unsigned long addr, unsigned long len) +{ + return __vmcache(hvmc_icinva, addr, len); +} + +static inline long __vmcache_idsync(unsigned long addr, + unsigned long len) +{ + return __vmcache(hvmc_idsync, addr, len); +} + +static inline long __vmcache_fetch_cfg(unsigned long val) +{ + return __vmcache(hvmc_fetch_cfg, val, 0); +} + +/* interrupt operations */ + +static inline long __vmintop_nop(void) +{ + return __vmintop(hvmi_nop, 0, 0, 0, 0); +} + +static inline long __vmintop_globen(long i) +{ + return __vmintop(hvmi_globen, i, 0, 0, 0); +} + +static inline long __vmintop_globdis(long i) +{ + return __vmintop(hvmi_globdis, i, 0, 0, 0); +} + +static inline long __vmintop_locen(long i) +{ + return __vmintop(hvmi_locen, i, 0, 0, 0); +} + +static inline long __vmintop_locdis(long i) +{ + return __vmintop(hvmi_locdis, i, 0, 0, 0); +} + +static inline long __vmintop_affinity(long i, long cpu) +{ + return __vmintop(hvmi_affinity, i, cpu, 0, 0); +} + +static inline long __vmintop_get(void) +{ + return __vmintop(hvmi_get, 0, 0, 0, 0); +} + +static inline long __vmintop_peek(void) +{ + return __vmintop(hvmi_peek, 0, 0, 0, 0); +} + +static inline long __vmintop_status(long i) +{ + return __vmintop(hvmi_status, i, 0, 0, 0); +} + +static inline long __vmintop_post(long i) +{ + return __vmintop(hvmi_post, i, 0, 0, 0); +} + +static inline long __vmintop_clear(long i) +{ + return __vmintop(hvmi_clear, i, 0, 0, 0); +} + +#else /* Only assembly code should reference these */ + +#endif /* __ASSEMBLY__ */ + +/* + * Constants for virtual instruction parameters and return values + */ + +/* vmnewmap arguments */ + +#define VM_TRANS_TYPE_LINEAR 0 +#define VM_TRANS_TYPE_TABLE 1 +#define VM_TLB_INVALIDATE_FALSE 0 +#define VM_TLB_INVALIDATE_TRUE 1 + +/* vmsetie arguments */ + +#define VM_INT_DISABLE 0 +#define VM_INT_ENABLE 1 + +/* vmsetimask arguments */ + +#define VM_INT_UNMASK 0 +#define VM_INT_MASK 1 + +#define VM_NEWMAP_TYPE_LINEAR 0 +#define VM_NEWMAP_TYPE_PGTABLES 1 + + +/* + * Event Record definitions useful to both C and Assembler + */ + +/* VMEST Layout */ + +#define HVM_VMEST_UM_SFT 31 +#define HVM_VMEST_UM_MSK 1 +#define HVM_VMEST_IE_SFT 30 +#define HVM_VMEST_IE_MSK 1 +#define HVM_VMEST_SS_SFT 29 +#define HVM_VMEST_SS_MSK 1 +#define HVM_VMEST_EVENTNUM_SFT 16 +#define HVM_VMEST_EVENTNUM_MSK 0xff +#define HVM_VMEST_CAUSE_SFT 0 +#define HVM_VMEST_CAUSE_MSK 0xffff + +/* + * The initial program gets to find a system environment descriptor + * on its stack when it begins execution. The first word is a version + * code to indicate what is there. Zero means nothing more. + */ + +#define HEXAGON_VM_SED_NULL 0 + +/* + * Event numbers for vector binding + */ + +#define HVM_EV_RESET 0 +#define HVM_EV_MACHCHECK 1 +#define HVM_EV_GENEX 2 +#define HVM_EV_TRAP 8 +#define HVM_EV_INTR 15 +/* These shoud be nuked as soon as we know the VM is up to spec v0.1.1 */ +#define HVM_EV_INTR_0 16 +#define HVM_MAX_INTR 240 + +/* + * Cause values for General Exception + */ + +#define HVM_GE_C_BUS 0x01 +#define HVM_GE_C_XPROT 0x11 +#define HVM_GE_C_XUSER 0x14 +#define HVM_GE_C_INVI 0x15 +#define HVM_GE_C_PRIVI 0x1B +#define HVM_GE_C_XMAL 0x1C +#define HVM_GE_C_WREG 0x1D +#define HVM_GE_C_PCAL 0x1E +#define HVM_GE_C_RMAL 0x20 +#define HVM_GE_C_WMAL 0x21 +#define HVM_GE_C_RPROT 0x22 +#define HVM_GE_C_WPROT 0x23 +#define HVM_GE_C_RUSER 0x24 +#define HVM_GE_C_WUSER 0x25 +#define HVM_GE_C_CACHE 0x28 + +/* + * Cause codes for Machine Check + */ + +#define HVM_MCHK_C_DOWN 0x00 +#define HVM_MCHK_C_BADSP 0x01 +#define HVM_MCHK_C_BADEX 0x02 +#define HVM_MCHK_C_BADPT 0x03 +#define HVM_MCHK_C_REGWR 0x29 + +#endif diff --git a/arch/hexagon/include/asm/intrinsics.h b/arch/hexagon/include/asm/intrinsics.h new file mode 100644 index 000000000..e8d668392 --- /dev/null +++ b/arch/hexagon/include/asm/intrinsics.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_HEXAGON_INTRINSICS_H +#define _ASM_HEXAGON_INTRINSICS_H + +#define HEXAGON_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0 +#define HEXAGON_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0 +#define HEXAGON_R_cl0_R __builtin_HEXAGON_S2_cl0 + +#endif diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h new file mode 100644 index 000000000..bda2a9c2d --- /dev/null +++ b/arch/hexagon/include/asm/io.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * IO definitions for the Hexagon architecture + * + * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_IO_H +#define _ASM_IO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* + * We don't have PCI yet. + * _IO_BASE is pointing at what should be unused virtual space. + */ +#define IO_SPACE_LIMIT 0xffff +#define _IO_BASE ((void __iomem *)0xfe000000) + +#define IOMEM(x) ((void __force __iomem *)(x)) + +extern int remap_area_pages(unsigned long start, unsigned long phys_addr, + unsigned long end, unsigned long flags); + +extern void iounmap(const volatile void __iomem *addr); + +/* Defined in lib/io.c, needed for smc91x driver. */ +extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); +extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen); + +extern void __raw_readsl(const void __iomem *addr, void *data, int wordlen); +extern void __raw_writesl(void __iomem *addr, const void *data, int wordlen); + +#define readsw(p, d, l) __raw_readsw(p, d, l) +#define writesw(p, d, l) __raw_writesw(p, d, l) + +#define readsl(p, d, l) __raw_readsl(p, d, l) +#define writesl(p, d, l) __raw_writesl(p, d, l) + +/* + * virt_to_phys - map virtual address to physical + * @address: address to map + */ +static inline unsigned long virt_to_phys(volatile void *address) +{ + return __pa(address); +} + +/* + * phys_to_virt - map physical address to virtual + * @address: address to map + */ +static inline void *phys_to_virt(unsigned long address) +{ + return __va(address); +} + +/* + * convert a physical pointer to a virtual kernel pointer for + * /dev/mem access. + */ +#define xlate_dev_kmem_ptr(p) __va(p) +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * IO port access primitives. Hexagon doesn't have special IO access + * instructions; all I/O is memory mapped. + * + * in/out are used for "ports", but we don't have "port instructions", + * so these are really just memory mapped too. + */ + +/* + * readb - read byte from memory mapped device + * @addr: pointer to memory + * + * Operates on "I/O bus memory space" + */ +static inline u8 readb(const volatile void __iomem *addr) +{ + u8 val; + asm volatile( + "%0 = memb(%1);" + : "=&r" (val) + : "r" (addr) + ); + return val; +} + +static inline u16 readw(const volatile void __iomem *addr) +{ + u16 val; + asm volatile( + "%0 = memh(%1);" + : "=&r" (val) + : "r" (addr) + ); + return val; +} + +static inline u32 readl(const volatile void __iomem *addr) +{ + u32 val; + asm volatile( + "%0 = memw(%1);" + : "=&r" (val) + : "r" (addr) + ); + return val; +} + +/* + * writeb - write a byte to a memory location + * @data: data to write to + * @addr: pointer to memory + * + */ +static inline void writeb(u8 data, volatile void __iomem *addr) +{ + asm volatile( + "memb(%0) = %1;" + : + : "r" (addr), "r" (data) + : "memory" + ); +} + +static inline void writew(u16 data, volatile void __iomem *addr) +{ + asm volatile( + "memh(%0) = %1;" + : + : "r" (addr), "r" (data) + : "memory" + ); + +} + +static inline void writel(u32 data, volatile void __iomem *addr) +{ + asm volatile( + "memw(%0) = %1;" + : + : "r" (addr), "r" (data) + : "memory" + ); +} + +#define __raw_writeb writeb +#define __raw_writew writew +#define __raw_writel writel + +#define __raw_readb readb +#define __raw_readw readw +#define __raw_readl readl + +/* + * http://comments.gmane.org/gmane.linux.ports.arm.kernel/117626 + */ + +#define readb_relaxed __raw_readb +#define readw_relaxed __raw_readw +#define readl_relaxed __raw_readl + +#define writeb_relaxed __raw_writeb +#define writew_relaxed __raw_writew +#define writel_relaxed __raw_writel + +void __iomem *ioremap(unsigned long phys_addr, unsigned long size); +#define ioremap_uc(X, Y) ioremap((X), (Y)) + + +#define __raw_writel writel + +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, + int count) +{ + memcpy(dst, (void *) src, count); +} + +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, + int count) +{ + memcpy((void *) dst, src, count); +} + +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset((void __force *)addr, value, size); +} + +#define PCI_IO_ADDR (volatile void __iomem *) + +/* + * inb - read byte from I/O port or something + * @port: address in I/O space + * + * Operates on "I/O bus I/O space" + */ +static inline u8 inb(unsigned long port) +{ + return readb(_IO_BASE + (port & IO_SPACE_LIMIT)); +} + +static inline u16 inw(unsigned long port) +{ + return readw(_IO_BASE + (port & IO_SPACE_LIMIT)); +} + +static inline u32 inl(unsigned long port) +{ + return readl(_IO_BASE + (port & IO_SPACE_LIMIT)); +} + +/* + * outb - write a byte to a memory location + * @data: data to write to + * @addr: address in I/O space + */ +static inline void outb(u8 data, unsigned long port) +{ + writeb(data, _IO_BASE + (port & IO_SPACE_LIMIT)); +} + +static inline void outw(u16 data, unsigned long port) +{ + writew(data, _IO_BASE + (port & IO_SPACE_LIMIT)); +} + +static inline void outl(u32 data, unsigned long port) +{ + writel(data, _IO_BASE + (port & IO_SPACE_LIMIT)); +} + +#define outb_p outb +#define outw_p outw +#define outl_p outl + +#define inb_p inb +#define inw_p inw +#define inl_p inl + +static inline void insb(unsigned long port, void *buffer, int count) +{ + if (count) { + u8 *buf = buffer; + do { + u8 x = inb(port); + *buf++ = x; + } while (--count); + } +} + +static inline void insw(unsigned long port, void *buffer, int count) +{ + if (count) { + u16 *buf = buffer; + do { + u16 x = inw(port); + *buf++ = x; + } while (--count); + } +} + +static inline void insl(unsigned long port, void *buffer, int count) +{ + if (count) { + u32 *buf = buffer; + do { + u32 x = inw(port); + *buf++ = x; + } while (--count); + } +} + +static inline void outsb(unsigned long port, const void *buffer, int count) +{ + if (count) { + const u8 *buf = buffer; + do { + outb(*buf++, port); + } while (--count); + } +} + +static inline void outsw(unsigned long port, const void *buffer, int count) +{ + if (count) { + const u16 *buf = buffer; + do { + outw(*buf++, port); + } while (--count); + } +} + +static inline void outsl(unsigned long port, const void *buffer, int count) +{ + if (count) { + const u32 *buf = buffer; + do { + outl(*buf++, port); + } while (--count); + } +} + +#endif /* __KERNEL__ */ + +#endif diff --git a/arch/hexagon/include/asm/irq.h b/arch/hexagon/include/asm/irq.h new file mode 100644 index 000000000..1f7f1292f --- /dev/null +++ b/arch/hexagon/include/asm/irq.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_IRQ_H_ +#define _ASM_IRQ_H_ + +/* Number of first-level interrupts associated with the CPU core. */ +#define HEXAGON_CPUINTS 32 + +/* + * Must define NR_IRQS before including + * 64 == the two SIRC's, 176 == the two gpio's + * + * IRQ configuration is still in flux; defining this to a comfortably + * large number. + */ +#define NR_IRQS 512 + +#include + +#endif diff --git a/arch/hexagon/include/asm/irqflags.h b/arch/hexagon/include/asm/irqflags.h new file mode 100644 index 000000000..c4d2a4de1 --- /dev/null +++ b/arch/hexagon/include/asm/irqflags.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * IRQ support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_IRQFLAGS_H +#define _ASM_IRQFLAGS_H + +#include +#include + +static inline unsigned long arch_local_save_flags(void) +{ + return __vmgetie(); +} + +static inline unsigned long arch_local_irq_save(void) +{ + return __vmsetie(VM_INT_DISABLE); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return !flags; +} + +static inline bool arch_irqs_disabled(void) +{ + return !__vmgetie(); +} + +static inline void arch_local_irq_enable(void) +{ + __vmsetie(VM_INT_ENABLE); +} + +static inline void arch_local_irq_disable(void) +{ + __vmsetie(VM_INT_DISABLE); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + __vmsetie(flags); +} + +#endif diff --git a/arch/hexagon/include/asm/kgdb.h b/arch/hexagon/include/asm/kgdb.h new file mode 100644 index 000000000..30cc387b3 --- /dev/null +++ b/arch/hexagon/include/asm/kgdb.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * arch/hexagon/include/asm/kgdb.h - Hexagon KGDB Support + * + * Copyright (c) 2011, The Linux Foundation. All rights reserved. + */ + +#ifndef __HEXAGON_KGDB_H__ +#define __HEXAGON_KGDB_H__ + +#define BREAK_INSTR_SIZE 4 +#define CACHE_FLUSH_IS_SAFE 1 +#define BUFMAX ((NUMREGBYTES * 2) + 512) + +static inline void arch_kgdb_breakpoint(void) +{ + asm("trap0(#0xDB)"); +} + +/* Registers: + * 32 gpr + sa0/1 + lc0/1 + m0/1 + gp + ugp + pred + pc = 42 total. + * vm regs = psp+elr+est+badva = 4 + * syscall+restart = 2 more + * also add cs0/1 = 2 + * so 48 = 42 + 4 + 2 + 2 + */ +#define DBG_USER_REGS 42 +#define DBG_MAX_REG_NUM (DBG_USER_REGS + 8) +#define NUMREGBYTES (DBG_MAX_REG_NUM*4) + +#endif /* __HEXAGON_KGDB_H__ */ diff --git a/arch/hexagon/include/asm/linkage.h b/arch/hexagon/include/asm/linkage.h new file mode 100644 index 000000000..ebdb58193 --- /dev/null +++ b/arch/hexagon/include/asm/linkage.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 4 +#define __ALIGN_STR ".align 4" + +#endif diff --git a/arch/hexagon/include/asm/mem-layout.h b/arch/hexagon/include/asm/mem-layout.h new file mode 100644 index 000000000..e2f99413f --- /dev/null +++ b/arch/hexagon/include/asm/mem-layout.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Memory layout definitions for the Hexagon architecture + * + * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_HEXAGON_MEM_LAYOUT_H +#define _ASM_HEXAGON_MEM_LAYOUT_H + +#include + +/* + * Have to do this for ginormous numbers, else they get printed as + * negative numbers, which the linker no likey when you try to + * assign it to the location counter. + */ + +#define PAGE_OFFSET _AC(0xc0000000, UL) + +/* + * Compiling for a platform that needs a crazy physical offset + * (like if the memory starts at 1GB and up) means we need + * an actual PHYS_OFFSET. Should be set up in head.S. + */ + +#ifdef CONFIG_HEXAGON_PHYS_OFFSET +#ifndef __ASSEMBLY__ +extern unsigned long __phys_offset; +#endif +#define PHYS_OFFSET __phys_offset +#endif + +#ifndef PHYS_OFFSET +#define PHYS_OFFSET 0 +#endif + +#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) +#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET + +#define TASK_SIZE (PAGE_OFFSET) + +/* not sure how these are used yet */ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE + +#ifndef __ASSEMBLY__ +enum fixed_addresses { + FIX_KMAP_BEGIN, + FIX_KMAP_END, /* check for per-cpuism */ + __end_of_fixed_addresses +}; + +#define MIN_KERNEL_SEG (PAGE_OFFSET >> PGDIR_SHIFT) /* L1 shift is 22 bits */ +extern int max_kernel_seg; + +/* + * Start of vmalloc virtual address space for kernel; + * supposed to be based on the amount of physical memory available + */ + +#define VMALLOC_START ((unsigned long) __va(high_memory + VMALLOC_OFFSET)) + +/* Gap between physical ram and vmalloc space for guard purposes. */ +#define VMALLOC_OFFSET PAGE_SIZE + +/* + * Create the space between VMALLOC_START and FIXADDR_TOP backwards + * from the ... "top". + * + * Permanent IO mappings will live at 0xfexx_xxxx + * Hypervisor occupies the last 16MB page at 0xffxxxxxx + */ + +#define FIXADDR_TOP 0xfe000000 +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +/* + * "permanent kernel mappings", defined as long-lasting mappings of + * high-memory page frames into the kernel address space. + */ + +#define LAST_PKMAP PTRS_PER_PTE +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +/* + * To the "left" of the fixed map space is the kmap space + * + * "Permanent Kernel Mappings"; fancy (or less fancy) PTE table + * that looks like it's actually walked. + * Need to check the alignment/shift usage; some archs use + * PMD_MASK on this value + */ +#define PKMAP_BASE (FIXADDR_START-PAGE_SIZE*LAST_PKMAP) + +/* + * 2 pages of guard gap between where vmalloc area ends + * and pkmap_base begins. + */ +#define VMALLOC_END (PKMAP_BASE-PAGE_SIZE*2) +#endif /* !__ASSEMBLY__ */ + + +#endif /* _ASM_HEXAGON_MEM_LAYOUT_H */ diff --git a/arch/hexagon/include/asm/mmu.h b/arch/hexagon/include/asm/mmu.h new file mode 100644 index 000000000..dfa46660d --- /dev/null +++ b/arch/hexagon/include/asm/mmu.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_MMU_H +#define _ASM_MMU_H + +#include + +/* + * Architecture-specific state for a mm_struct. + * For the Hexagon Virtual Machine, it can be a copy + * of the pointer to the page table base. + */ +struct mm_context { + unsigned long long generation; + unsigned long ptbase; + struct hexagon_vdso *vdso; +}; + +typedef struct mm_context mm_context_t; + +#endif diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h new file mode 100644 index 000000000..cdc4adc03 --- /dev/null +++ b/arch/hexagon/include/asm/mmu_context.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * MM context support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_MMU_CONTEXT_H +#define _ASM_MMU_CONTEXT_H + +#include + +#include +#include +#include +#include + +static inline void destroy_context(struct mm_struct *mm) +{ +} + +/* + * VM port hides all TLB management, so "lazy TLB" isn't very + * meaningful. Even for ports to architectures with visble TLBs, + * this is almost invariably a null function. + */ +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +/* + * Architecture-specific actions, if any, for memory map deactivation. + */ +static inline void deactivate_mm(struct task_struct *tsk, + struct mm_struct *mm) +{ +} + +/** + * init_new_context - initialize context related info for new mm_struct instance + * @tsk: pointer to a task struct + * @mm: pointer to a new mm struct + */ +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + /* mm->context is set up by pgd_alloc */ + return 0; +} + +/* + * Switch active mm context + */ +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + int l1; + + /* + * For virtual machine, we have to update system map if it's been + * touched. + */ + if (next->context.generation < prev->context.generation) { + for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++) + next->pgd[l1] = init_mm.pgd[l1]; + + next->context.generation = prev->context.generation; + } + + __vmnewmap((void *)next->context.ptbase); +} + +/* + * Activate new memory map for task + */ +static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm(prev, next, current_thread_info()->task); + local_irq_restore(flags); +} + +/* Generic hooks for arch_dup_mmap and arch_exit_mmap */ +#include + +#endif diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h new file mode 100644 index 000000000..7cbf719c5 --- /dev/null +++ b/arch/hexagon/include/asm/page.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Page management definitions for the Hexagon architecture + * + * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_PAGE_H +#define _ASM_PAGE_H + +#include + +/* This is probably not the most graceful way to handle this. */ + +#ifdef CONFIG_PAGE_SIZE_4KB +#define PAGE_SHIFT 12 +#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB +#endif + +#ifdef CONFIG_PAGE_SIZE_16KB +#define PAGE_SHIFT 14 +#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB +#endif + +#ifdef CONFIG_PAGE_SIZE_64KB +#define PAGE_SHIFT 16 +#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB +#endif + +#ifdef CONFIG_PAGE_SIZE_256KB +#define PAGE_SHIFT 18 +#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB +#endif + +#ifdef CONFIG_PAGE_SIZE_1MB +#define PAGE_SHIFT 20 +#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB +#endif + +/* + * These should be defined in hugetlb.h, but apparently not. + * "Huge" for us should be 4MB or 16MB, which are both represented + * in L1 PTE's. Right now, it's set up for 4MB. + */ +#ifdef CONFIG_HUGETLB_PAGE +#define HPAGE_SHIFT 22 +#define HPAGE_SIZE (1UL << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE-1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) +#define HVM_HUGEPAGE_SIZE 0x5 +#endif + +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +/* + * This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in. + */ +#include + +/* + * We implement a two-level architecture-specific page table structure. + * Null intermediate page table level (pmd, pud) definitions will come from + * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +/* + * We need a __pa and a __va routine for kernel space. + * MIPS says they're only used during mem_init. + * also, check if we need a PHYS_OFFSET. + */ +#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) +#define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET)) + +/* The "page frame" descriptor is defined in linux/mm.h */ +struct page; + +/* Returns page frame descriptor for virtual address. */ +#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) + +/* Default vm area behavior is non-executable. */ +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC + +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +/* Need to not use a define for linesize; may move this to another file. */ +static inline void clear_page(void *page) +{ + /* This can only be done on pages with L1 WB cache */ + asm volatile( + " loop0(1f,%1);\n" + "1: { dczeroa(%0);\n" + " %0 = add(%0,#32); }:endloop0\n" + : "+r" (page) + : "r" (PAGE_SIZE/32) + : "lc0", "sa0", "memory" + ); +} + +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +/* + * Under assumption that kernel always "sees" user map... + */ +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +/* + * page_to_phys - convert page to physical address + * @page - pointer to page entry in mem_map + */ +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define page_to_virt(page) __va(page_to_phys(page)) + +/* + * For port to Hexagon Virtual Machine, MAYBE we check for attempts + * to reference reserved HVM space, but in any case, the VM will be + * protected. + */ +#define kern_addr_valid(addr) (1) + +#include +#include +/* XXX Todo: implement assembly-optimized version of getorder. */ +#include + +#endif /* ifdef __ASSEMBLY__ */ +#endif /* ifdef __KERNEL__ */ + +#endif diff --git a/arch/hexagon/include/asm/perf_event.h b/arch/hexagon/include/asm/perf_event.h new file mode 100644 index 000000000..81db312b8 --- /dev/null +++ b/arch/hexagon/include/asm/perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_PERF_EVENT_H +#define _ASM_PERF_EVENT_H + +#endif /* _ASM_PERF_EVENT_H */ diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h new file mode 100644 index 000000000..f0c47e6a7 --- /dev/null +++ b/arch/hexagon/include/asm/pgalloc.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Page table support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_PGALLOC_H +#define _ASM_PGALLOC_H + +#include +#include + +#include + +extern unsigned long long kmap_generation; + +/* + * Page table creation interface + */ +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd; + + pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + + /* + * There may be better ways to do this, but to ensure + * that new address spaces always contain the kernel + * base mapping, and to ensure that the user area is + * initially marked invalid, initialize the new map + * map with a copy of the kernel's persistent map. + */ + + memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t)); + mm->context.generation = kmap_generation; + + /* Physical version is what is passed to virtual machine on switch */ + mm->context.ptbase = __pa(pgd); + + return pgd; +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + /* + * Conveniently, zero in 3 LSB means indirect 4K page table. + * Not so convenient when you're trying to vary the page size. + */ + set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) | + HEXAGON_L1_PTE_SIZE)); +} + +/* + * Other architectures seem to have ways of making all processes + * share the same pmd's for their kernel mappings, but the v0.3 + * Hexagon VM spec has a "monolithic" L1 table for user and kernel + * segments. We track "generations" of the kernel map to minimize + * overhead, and update the "slave" copies of the kernel mappings + * as part of switch_mm. However, we still need to update the + * kernel map of the active thread who's calling pmd_populate_kernel... + */ +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + extern spinlock_t kmap_gen_lock; + pmd_t *ppmd; + int pmdindex; + + spin_lock(&kmap_gen_lock); + kmap_generation++; + mm->context.generation = kmap_generation; + current->active_mm->context.generation = kmap_generation; + spin_unlock(&kmap_gen_lock); + + set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); + + /* + * Now the "slave" copy of the current thread. + * This is pointer arithmetic, not byte addresses! + */ + pmdindex = (pgd_t *)pmd - mm->pgd; + ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; + set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); + if (pmdindex > max_kernel_seg) + max_kernel_seg = pmdindex; +} + +#define __pte_free_tlb(tlb, pte, addr) \ +do { \ + pgtable_pte_page_dtor((pte)); \ + tlb_remove_page((tlb), (pte)); \ +} while (0) + +#endif diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h new file mode 100644 index 000000000..dbb22b80b --- /dev/null +++ b/arch/hexagon/include/asm/pgtable.h @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Page table support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_PGTABLE_H +#define _ASM_PGTABLE_H + +/* + * Page table definitions for Qualcomm Hexagon processor. + */ +#include +#include + +/* A handy thing to have if one has the RAM. Declared in head.S */ +extern unsigned long empty_zero_page; + +/* + * The PTE model described here is that of the Hexagon Virtual Machine, + * which autonomously walks 2-level page tables. At a lower level, we + * also describe the RISCish software-loaded TLB entry structure of + * the underlying Hexagon processor. A kernel built to run on the + * virtual machine has no need to know about the underlying hardware. + */ +#include + +/* + * To maximize the comfort level for the PTE manipulation macros, + * define the "well known" architecture-specific bits. + */ +#define _PAGE_READ __HVM_PTE_R +#define _PAGE_WRITE __HVM_PTE_W +#define _PAGE_EXECUTE __HVM_PTE_X +#define _PAGE_USER __HVM_PTE_U + +/* + * We have a total of 4 "soft" bits available in the abstract PTE. + * The two mandatory software bits are Dirty and Accessed. + * To make nonlinear swap work according to the more recent + * model, we want a low order "Present" bit to indicate whether + * the PTE describes MMU programming or swap space. + */ +#define _PAGE_PRESENT (1<<0) +#define _PAGE_DIRTY (1<<1) +#define _PAGE_ACCESSED (1<<2) + +/* + * For now, let's say that Valid and Present are the same thing. + * Alternatively, we could say that it's the "or" of R, W, and X + * permissions. + */ +#define _PAGE_VALID _PAGE_PRESENT + +/* + * We're not defining _PAGE_GLOBAL here, since there's no concept + * of global pages or ASIDs exposed to the Hexagon Virtual Machine, + * and we want to use the same page table structures and macros in + * the native kernel as we do in the virtual machine kernel. + * So we'll put up with a bit of inefficiency for now... + */ + +/* + * Top "FOURTH" level (pgd), which for the Hexagon VM is really + * only the second from the bottom, pgd and pud both being collapsed. + * Each entry represents 4MB of virtual address space, 4K of table + * thus maps the full 4GB. + */ +#define PGDIR_SHIFT 22 +#define PTRS_PER_PGD 1024 + +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +#ifdef CONFIG_PAGE_SIZE_4KB +#define PTRS_PER_PTE 1024 +#endif + +#ifdef CONFIG_PAGE_SIZE_16KB +#define PTRS_PER_PTE 256 +#endif + +#ifdef CONFIG_PAGE_SIZE_64KB +#define PTRS_PER_PTE 64 +#endif + +#ifdef CONFIG_PAGE_SIZE_256KB +#define PTRS_PER_PTE 16 +#endif + +#ifdef CONFIG_PAGE_SIZE_1MB +#define PTRS_PER_PTE 4 +#endif + +/* Any bigger and the PTE disappears. */ +#define pgd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\ + pgd_val(e)) + +/* + * Page Protection Constants. Includes (in this variant) cache attributes. + */ +extern unsigned long _dflt_cache_att; + +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _dflt_cache_att) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) +#define PAGE_COPY PAGE_READONLY +#define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) +#define PAGE_COPY_EXEC PAGE_EXEC +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ + _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att) + + +/* + * Aliases for mapping mmap() protection bits to page protections. + * These get used for static initialization, so using the _dflt_cache_att + * variable for the default cache attribute isn't workable. If the + * default gets changed at boot time, the boot option code has to + * update data structures like the protaction_map[] array. + */ +#define CACHEDEF (CACHE_DEFAULT << 6) + +/* Private (copy-on-write) page protections. */ +#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) +#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) +#define __P010 __P000 /* Write-only copy-on-write */ +#define __P011 __P001 /* Read/Write copy-on-write */ +#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_EXECUTE | CACHEDEF) +#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ + _PAGE_READ | CACHEDEF) +#define __P110 __P100 /* Write/execute copy-on-write */ +#define __P111 __P101 /* Read/Write/Execute, copy-on-write */ + +/* Shared page protections. */ +#define __S000 __P000 +#define __S001 __P001 +#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_WRITE | CACHEDEF) +#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ + _PAGE_WRITE | CACHEDEF) +#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_EXECUTE | CACHEDEF) +#define __S101 __P101 +#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) +#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ + _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ + +/* Seems to be zero even in architectures where the zero page is firewalled? */ +#define FIRST_USER_ADDRESS 0UL + +/* HUGETLB not working currently */ +#ifdef CONFIG_HUGETLB_PAGE +#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) +#endif + +/* + * For now, assume that higher-level code will do TLB/MMU invalidations + * and don't insert that overhead into this low-level function. + */ +extern void sync_icache_dcache(pte_t pte); + +#define pte_present_exec_user(pte) \ + ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \ + (_PAGE_EXECUTE | _PAGE_USER)) + +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + /* should really be using pte_exec, if it weren't declared later. */ + if (pte_present_exec_user(pteval)) + sync_icache_dcache(pteval); + + *ptep = pteval; +} + +/* + * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid + * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE + * (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7 + * as a universal null entry, but some of those least significant bits + * are interpreted by software. + */ +#define _NULL_PMD 0x7 +#define _NULL_PTE 0x0 + +static inline void pmd_clear(pmd_t *pmd_entry_ptr) +{ + pmd_val(*pmd_entry_ptr) = _NULL_PMD; +} + +/* + * Conveniently, a null PTE value is invalid. + */ +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_val(*ptep) = _NULL_PTE; +} + +/** + * pmd_none - check if pmd_entry is mapped + * @pmd_entry: pmd entry + * + * MIPS checks it against that "invalid pte table" thing. + */ +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == _NULL_PMD; +} + +/** + * pmd_present - is there a page table behind this? + * Essentially the inverse of pmd_none. We maybe + * save an inline instruction by defining it this + * way, instead of simply "!pmd_none". + */ +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != (unsigned long)_NULL_PMD; +} + +/** + * pmd_bad - check if a PMD entry is "bad". That might mean swapped out. + * As we have no known cause of badness, it's null, as it is for many + * architectures. + */ +static inline int pmd_bad(pmd_t pmd) +{ + return 0; +} + +/* + * pmd_page - converts a PMD entry to a page pointer + */ +#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) +#define pmd_pgtable(pmd) pmd_page(pmd) + +/** + * pte_none - check if pte is mapped + * @pte: pte_t entry + */ +static inline int pte_none(pte_t pte) +{ + return pte_val(pte) == _NULL_PTE; +}; + +/* + * pte_present - check if page is present + */ +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* mk_pte - make a PTE out of a page pointer and protection bits */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */ +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +/* pte_mkold - mark PTE as not recently accessed */ +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_ACCESSED; + return pte; +} + +/* pte_mkyoung - mark PTE as recently accessed */ +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + return pte; +} + +/* pte_mkclean - mark page as in sync with backing store */ +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_DIRTY; + return pte; +} + +/* pte_mkdirty - mark page as modified */ +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_DIRTY; + return pte; +} + +/* pte_young - "is PTE marked as accessed"? */ +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +/* pte_dirty - "is PTE dirty?" */ +static inline int pte_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_DIRTY; +} + +/* pte_modify - set protection bits on PTE */ +static inline pte_t pte_modify(pte_t pte, pgprot_t prot) +{ + pte_val(pte) &= PAGE_MASK; + pte_val(pte) |= pgprot_val(prot); + return pte; +} + +/* pte_wrprotect - mark page as not writable */ +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_WRITE; + return pte; +} + +/* pte_mkwrite - mark page as writable */ +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + return pte; +} + +/* pte_mkexec - mark PTE as executable */ +static inline pte_t pte_mkexec(pte_t pte) +{ + pte_val(pte) |= _PAGE_EXECUTE; + return pte; +} + +/* pte_read - "is PTE marked as readable?" */ +static inline int pte_read(pte_t pte) +{ + return pte_val(pte) & _PAGE_READ; +} + +/* pte_write - "is PTE marked as writable?" */ +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_WRITE; +} + + +/* pte_exec - "is PTE marked as executable?" */ +static inline int pte_exec(pte_t pte) +{ + return pte_val(pte) & _PAGE_EXECUTE; +} + +/* __pte_to_swp_entry - extract swap entry from PTE */ +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) + +/* __swp_entry_to_pte - extract PTE from swap entry */ +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +/* pfn_pte - convert page number and protection value to page table entry */ +#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) + +/* pte_pfn - convert pte to page frame number */ +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) + +/* + * set_pte_at - update page table and do whatever magic may be + * necessary to make the underlying hardware/firmware take note. + * + * VM may require a virtual instruction to alert the MMU. + */ +#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte) + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK); +} + +/* ZERO_PAGE - returns the globally shared zero page */ +#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) + +/* + * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is + * interpreted as swap information. The remaining free bits are interpreted as + * swap type/offset tuple. Rather than have the TLB fill handler test + * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to + * all zeros for swap entries, which speeds up the miss handler at the cost of + * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon + * processor architecture and target applications suggest a lot of TLB misses + * and not much swap space. + * + * Format of swap PTE: + * bit 0: Present (zero) + * bits 1-5: swap type (arch independent layer uses 5 bits max) + * bits 6-9: bits 3:0 of offset + * bits 10-12: effectively _PAGE_PROTNONE (all zero) + * bits 13-31: bits 22:4 of swap offset + * + * The split offset makes some of the following macros a little gnarly, + * but there's plenty of precedent for this sort of thing. + */ + +/* Used for swap PTEs */ +#define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f) + +#define __swp_offset(swp_pte) \ + ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0)) + +#define __swp_entry(type, offset) \ + ((swp_entry_t) { \ + ((type << 1) | \ + ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) }) + +#endif diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h new file mode 100644 index 000000000..9f0cc9942 --- /dev/null +++ b/arch/hexagon/include/asm/processor.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Process/processor support for the Hexagon architecture + * + * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_PROCESSOR_H +#define _ASM_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +/* task_struct, defined elsewhere, is the "process descriptor" */ +struct task_struct; + +extern void start_thread(struct pt_regs *, unsigned long, unsigned long); + +/* + * thread_struct is supposed to be for context switch data. + * Specifically, to hold the state necessary to perform switch_to... + */ +struct thread_struct { + void *switch_sp; +}; + +/* + * initializes thread_struct + * The only thing we have in there is switch_sp + * which doesn't really need to be initialized. + */ + +#define INIT_THREAD { \ +} + +#define cpu_relax() __vmyield() + +/* + * Decides where the kernel will search for a free chunk of vm space during + * mmaps. + * See also arch_get_unmapped_area. + * Doesn't affect if you have MAX_FIXED in the page flags set though... + * + * Apparently the convention is that ld.so will ask for "unmapped" private + * memory to be allocated SOMEWHERE, but it also asks for memory explicitly + * via MAP_FIXED at the lower * addresses starting at VA=0x0. + * + * If the two requests collide, you get authentic segfaulting action, so + * you have to kick the "unmapped" base requests higher up. + */ +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE/3)) + + +#define task_pt_regs(task) \ + ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1) + +#define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk))) +#define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk))) + +/* Free all resources held by a thread; defined in process.c */ +extern void release_thread(struct task_struct *dead_task); + +/* Get wait channel for task P. */ +extern unsigned long get_wchan(struct task_struct *p); + +/* The following stuff is pretty HEXAGON specific. */ + +/* This is really just here for __switch_to. + Offsets are pulled via asm-offsets.c */ + +/* + * No real reason why VM and native switch stacks should be different. + * Ultimately this should merge. Note that Rev C. ABI called out only + * R24-27 as callee saved GPRs needing explicit attention (R29-31 being + * dealt with automagically by allocframe), but the current ABI has + * more, R16-R27. By saving more, the worst case is that we waste some + * cycles if building with the old compilers. + */ + +struct hexagon_switch_stack { + union { + struct { + unsigned long r16; + unsigned long r17; + }; + unsigned long long r1716; + }; + union { + struct { + unsigned long r18; + unsigned long r19; + }; + unsigned long long r1918; + }; + union { + struct { + unsigned long r20; + unsigned long r21; + }; + unsigned long long r2120; + }; + union { + struct { + unsigned long r22; + unsigned long r23; + }; + unsigned long long r2322; + }; + union { + struct { + unsigned long r24; + unsigned long r25; + }; + unsigned long long r2524; + }; + union { + struct { + unsigned long r26; + unsigned long r27; + }; + unsigned long long r2726; + }; + + unsigned long fp; + unsigned long lr; +}; + +#endif /* !__ASSEMBLY__ */ + +#endif diff --git a/arch/hexagon/include/asm/smp.h b/arch/hexagon/include/asm/smp.h new file mode 100644 index 000000000..60d6c9486 --- /dev/null +++ b/arch/hexagon/include/asm/smp.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SMP definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include + +#define raw_smp_processor_id() (current_thread_info()->cpu) + +enum ipi_message_type { + IPI_NOP = 0, + IPI_RESCHEDULE = 1, + IPI_CALL_FUNC, + IPI_CPU_STOP, + IPI_TIMER, +}; + +extern void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg); +extern void smp_start_cpus(void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +extern void smp_vm_unmask_irq(void *info); + +#endif diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h new file mode 100644 index 000000000..ef103b73b --- /dev/null +++ b/arch/hexagon/include/asm/spinlock.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Spinlock support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_SPINLOCK_H +#define _ASM_SPINLOCK_H + +#include +#include +#include + +/* + * This file is pulled in for SMP builds. + * Really need to check all the barrier stuff for "true" SMP + */ + +/* + * Read locks: + * - load the lock value + * - increment it + * - if the lock value is still negative, go back and try again. + * - unsuccessful store is unsuccessful. Go back and try again. Loser. + * - successful store new lock value if positive -> lock acquired + */ +static inline void arch_read_lock(arch_rwlock_t *lock) +{ + __asm__ __volatile__( + "1: R6 = memw_locked(%0);\n" + " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" + " { if (!P3) jump 1b; }\n" + " memw_locked(%0,P3) = R6;\n" + " { if (!P3) jump 1b; }\n" + : + : "r" (&lock->lock) + : "memory", "r6", "p3" + ); + +} + +static inline void arch_read_unlock(arch_rwlock_t *lock) +{ + __asm__ __volatile__( + "1: R6 = memw_locked(%0);\n" + " R6 = add(R6,#-1);\n" + " memw_locked(%0,P3) = R6\n" + " if (!P3) jump 1b;\n" + : + : "r" (&lock->lock) + : "memory", "r6", "p3" + ); + +} + +/* I think this returns 0 on fail, 1 on success. */ +static inline int arch_read_trylock(arch_rwlock_t *lock) +{ + int temp; + __asm__ __volatile__( + " R6 = memw_locked(%1);\n" + " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" + " { if (!P3) jump 1f; }\n" + " memw_locked(%1,P3) = R6;\n" + " { %0 = P3 }\n" + "1:\n" + : "=&r" (temp) + : "r" (&lock->lock) + : "memory", "r6", "p3" + ); + return temp; +} + +/* Stuffs a -1 in the lock value? */ +static inline void arch_write_lock(arch_rwlock_t *lock) +{ + __asm__ __volatile__( + "1: R6 = memw_locked(%0)\n" + " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" + " { if (!P3) jump 1b; }\n" + " memw_locked(%0,P3) = R6;\n" + " { if (!P3) jump 1b; }\n" + : + : "r" (&lock->lock) + : "memory", "r6", "p3" + ); +} + + +static inline int arch_write_trylock(arch_rwlock_t *lock) +{ + int temp; + __asm__ __volatile__( + " R6 = memw_locked(%1)\n" + " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" + " { if (!P3) jump 1f; }\n" + " memw_locked(%1,P3) = R6;\n" + " %0 = P3;\n" + "1:\n" + : "=&r" (temp) + : "r" (&lock->lock) + : "memory", "r6", "p3" + ); + return temp; + +} + +static inline void arch_write_unlock(arch_rwlock_t *lock) +{ + smp_mb(); + lock->lock = 0; +} + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + __asm__ __volatile__( + "1: R6 = memw_locked(%0);\n" + " P3 = cmp.eq(R6,#0);\n" + " { if (!P3) jump 1b; R6 = #1; }\n" + " memw_locked(%0,P3) = R6;\n" + " { if (!P3) jump 1b; }\n" + : + : "r" (&lock->lock) + : "memory", "r6", "p3" + ); + +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + lock->lock = 0; +} + +static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) +{ + int temp; + __asm__ __volatile__( + " R6 = memw_locked(%1);\n" + " P3 = cmp.eq(R6,#0);\n" + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" + " memw_locked(%1,P3) = R6;\n" + " %0 = P3;\n" + "1:\n" + : "=&r" (temp) + : "r" (&lock->lock) + : "memory", "r6", "p3" + ); + return temp; +} + +/* + * SMP spinlocks are intended to allow only a single CPU at the lock + */ +#define arch_spin_is_locked(x) ((x)->lock != 0) + +#endif diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h new file mode 100644 index 000000000..19d233497 --- /dev/null +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Spinlock support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_SPINLOCK_TYPES_H +#define _ASM_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int lock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/arch/hexagon/include/asm/string.h b/arch/hexagon/include/asm/string.h new file mode 100644 index 000000000..5972cca79 --- /dev/null +++ b/arch/hexagon/include/asm/string.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_STRING_H_ +#define _ASM_STRING_H_ + +#ifdef __KERNEL__ +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); + +/* ToDo: use dczeroa, accelerate the compiler-constant zero case */ +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__to, int c, size_t __n); +#endif + + +#endif /* _ASM_STRING_H_ */ diff --git a/arch/hexagon/include/asm/suspend.h b/arch/hexagon/include/asm/suspend.h new file mode 100644 index 000000000..57b1e1152 --- /dev/null +++ b/arch/hexagon/include/asm/suspend.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_SUSPEND_H +#define _ASM_SUSPEND_H + +static inline int arch_prepare_suspend(void) +{ + return 0; +} + +#endif diff --git a/arch/hexagon/include/asm/switch_to.h b/arch/hexagon/include/asm/switch_to.h new file mode 100644 index 000000000..8c7d13fe4 --- /dev/null +++ b/arch/hexagon/include/asm/switch_to.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Task switching definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +struct thread_struct; + +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *, + struct task_struct *); + +#define switch_to(p, n, r) do {\ + r = __switch_to((p), (n), (r));\ +} while (0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h new file mode 100644 index 000000000..f6e454f18 --- /dev/null +++ b/arch/hexagon/include/asm/syscall.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Syscall support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_HEXAGON_SYSCALL_H +#define _ASM_HEXAGON_SYSCALL_H + +#include +#include +#include + +typedef long (*syscall_fn)(unsigned long, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); + +#include + +extern void *sys_call_table[]; + +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->r06; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + memcpy(args, &(®s->r00)[0], 6 * sizeof(args[0])); +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + return IS_ERR_VALUE(regs->r00) ? regs->r00 : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->r00; +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_HEXAGON; +} + +#endif diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h new file mode 100644 index 000000000..535976665 --- /dev/null +++ b/arch/hexagon/include/asm/thread_info.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Thread support for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#endif + +#define THREAD_SHIFT 12 +#define THREAD_SIZE (1<preemptible,<0=>BUG */ + mm_segment_t addr_limit; /* segmentation sux */ + /* + * used for syscalls somehow; + * seems to have a function pointer and four arguments + */ + /* Points to the current pt_regs frame */ + struct pt_regs *regs; + /* + * saved kernel sp at switch_to time; + * not sure if this is used (it's not in the VM model it seems; + * see thread_struct) + */ + unsigned long sp; +}; + +#else /* !__ASSEMBLY__ */ + +#include + +#endif /* __ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ + .sp = 0, \ + .regs = NULL, \ +} + +/* Tacky preprocessor trickery */ +#define qqstr(s) qstr(s) +#define qstr(s) #s +#define QUOTED_THREADINFO_REG qqstr(THREADINFO_REG) + +register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG); +#define current_thread_info() __current_thread_info + +#endif /* __ASSEMBLY__ */ + +/* + * thread information flags + * - these are process state flags that various assembly files + * may need to access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ + +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */ +#define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +/* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_MEMDIE 17 /* OOM killer killed process */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) + +/* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */ +#define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE) + +/* work to do on any return to u-space */ +#define _TIF_ALLWORK_MASK 0x0000FFFF + +#endif /* __KERNEL__ */ + +#endif diff --git a/arch/hexagon/include/asm/time.h b/arch/hexagon/include/asm/time.h new file mode 100644 index 000000000..7d3d22d47 --- /dev/null +++ b/arch/hexagon/include/asm/time.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef ASM_TIME_H +#define ASM_TIME_H + +extern cycles_t pcycle_freq_mhz; +extern cycles_t thread_freq_mhz; +extern cycles_t sleep_clk_freq; + +void setup_percpu_clockdev(void); +void ipi_timer(void); + +#endif diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h new file mode 100644 index 000000000..dfe69e118 --- /dev/null +++ b/arch/hexagon/include/asm/timex.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_TIMEX_H +#define _ASM_TIMEX_H + +#include +#include + +/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */ +#define CLOCK_TICK_RATE 19200 + +#define ARCH_HAS_READ_CURRENT_TIMER + +static inline int read_current_timer(unsigned long *timer_val) +{ + *timer_val = __vmgettime(); + return 0; +} + +#endif diff --git a/arch/hexagon/include/asm/tlb.h b/arch/hexagon/include/asm/tlb.h new file mode 100644 index 000000000..2c9390e7c --- /dev/null +++ b/arch/hexagon/include/asm/tlb.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_TLB_H +#define _ASM_TLB_H + +#include +#include + +#include + +#endif diff --git a/arch/hexagon/include/asm/tlbflush.h b/arch/hexagon/include/asm/tlbflush.h new file mode 100644 index 000000000..a7c9ab398 --- /dev/null +++ b/arch/hexagon/include/asm/tlbflush.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TLB flush support for Hexagon + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_TLBFLUSH_H +#define _ASM_TLBFLUSH_H + +#include +#include + +/* + * TLB flushing -- in "SMP", these routines get defined to be the + * ones from smp.c, else they are some local flavors. + */ + +/* + * These functions are commonly macros, but in the interests of + * VM vs. native implementation and code size, we simply declare + * the function prototypes here. + */ +extern void tlb_flush_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_tlb_one(unsigned long); + +/* + * "This is called in munmap when we have freed up some page-table pages. + * We don't need to do anything here..." + * + * The VM kernel doesn't walk page tables, and they are passed to the VMM + * by logical address. There doesn't seem to be any possibility that they + * could be referenced by the VM kernel based on a stale mapping, since + * they would only be located by consulting the mm structure, and they + * will have been purged from that structure by the munmap. Seems like + * a noop on HVM as well. + */ +#define flush_tlb_pgtables(mm, start, end) + +#endif diff --git a/arch/hexagon/include/asm/traps.h b/arch/hexagon/include/asm/traps.h new file mode 100644 index 000000000..3ea5fc84c --- /dev/null +++ b/arch/hexagon/include/asm/traps.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Trap support for Hexagon + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_HEXAGON_TRAPS_H +#define _ASM_HEXAGON_TRAPS_H + +#include + +extern int die(const char *str, struct pt_regs *regs, long err); +extern int die_if_kernel(char *str, struct pt_regs *regs, long err); + +#endif /* _ASM_HEXAGON_TRAPS_H */ diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h new file mode 100644 index 000000000..c1019a736 --- /dev/null +++ b/arch/hexagon/include/asm/uaccess.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * User memory access support for Hexagon + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_UACCESS_H +#define _ASM_UACCESS_H +/* + * User space memory access functions + */ +#include + +/* + * access_ok: - Checks if a user space pointer is valid + * @addr: User space pointer to start of block to check + * @size: Size of block to check + * + * Context: User context only. This function may sleep if pagefaults are + * enabled. + * + * Checks if a pointer to a block of memory in user space is valid. + * + * Returns true (nonzero) if the memory block *may* be valid, false (zero) + * if it is definitely invalid. + * + * User address space in Hexagon, like x86, goes to 0xbfffffff, so the + * simple MSB-based tests used by MIPS won't work. Some further + * optimization is probably possible here, but for now, keep it + * reasonably simple and not *too* slow. After all, we've got the + * MMU for backup. + */ + +#define __access_ok(addr, size) \ + ((get_fs().seg == KERNEL_DS.seg) || \ + (((unsigned long)addr < get_fs().seg) && \ + (unsigned long)size < (get_fs().seg - (unsigned long)addr))) + +/* + * When a kernel-mode page fault is taken, the faulting instruction + * address is checked against a table of exception_table_entries. + * Each entry is a tuple of the address of an instruction that may + * be authorized to fault, and the address at which execution should + * be resumed instead of the faulting instruction, so as to effect + * a workaround. + */ + +/* Assembly somewhat optimized copy routines */ +unsigned long raw_copy_from_user(void *to, const void __user *from, + unsigned long n); +unsigned long raw_copy_to_user(void __user *to, const void *from, + unsigned long n); +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count); +#define __clear_user(a, s) __clear_user_hexagon((a), (s)) + +#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n) + +/* get around the ifndef in asm-generic/uaccess.h */ +#define __strnlen_user __strnlen_user + +extern long __strnlen_user(const char __user *src, long n); + +static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, + long n); + +#include + +/* Todo: an actual accelerated version of this. */ +static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, + long n) +{ + long res = __strnlen_user(src, n); + + if (unlikely(!res)) + return -EFAULT; + + if (res > n) { + long left = raw_copy_from_user(dst, src, n); + if (unlikely(left)) + memset(dst + (n - left), 0, left); + return n; + } else { + long left = raw_copy_from_user(dst, src, res); + if (unlikely(left)) + memset(dst + (res - left), 0, left); + return res-1; + } +} + +#endif diff --git a/arch/hexagon/include/asm/vdso.h b/arch/hexagon/include/asm/vdso.h new file mode 100644 index 000000000..e8f621f9f --- /dev/null +++ b/arch/hexagon/include/asm/vdso.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * vDSO implementation for Hexagon + * + * Copyright (c) 2011, The Linux Foundation. All rights reserved. + */ + +#ifndef __ASM_VDSO_H +#define __ASM_VDSO_H + +#include + +struct hexagon_vdso { + u32 rt_signal_trampoline[2]; +}; + +#endif /* __ASM_VDSO_H */ diff --git a/arch/hexagon/include/asm/vermagic.h b/arch/hexagon/include/asm/vermagic.h new file mode 100644 index 000000000..0e8dedc8c --- /dev/null +++ b/arch/hexagon/include/asm/vermagic.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_VERMAGIC_H +#define _ASM_VERMAGIC_H + +#include + +#define MODULE_ARCH_VERMAGIC __stringify(PROCESSOR_MODEL_NAME) " " + +#endif /* _ASM_VERMAGIC_H */ diff --git a/arch/hexagon/include/asm/vm_fault.h b/arch/hexagon/include/asm/vm_fault.h new file mode 100644 index 000000000..27842ef1d --- /dev/null +++ b/arch/hexagon/include/asm/vm_fault.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_HEXAGON_VM_FAULT_H +#define _ASM_HEXAGON_VM_FAULT_H + +extern void execute_protection_fault(struct pt_regs *); +extern void write_protection_fault(struct pt_regs *); +extern void read_protection_fault(struct pt_regs *); + +#endif diff --git a/arch/hexagon/include/asm/vm_mmu.h b/arch/hexagon/include/asm/vm_mmu.h new file mode 100644 index 000000000..c3531384f --- /dev/null +++ b/arch/hexagon/include/asm/vm_mmu.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hexagon VM page table entry definitions + * + * Copyright (c) 2010-2011,2013 The Linux Foundation. All rights reserved. + */ + +#ifndef _ASM_VM_MMU_H +#define _ASM_VM_MMU_H + +/* + * Shift, mask, and other constants for the Hexagon Virtual Machine + * page tables. + * + * Virtual machine MMU allows first-level entries to either be + * single-level lookup PTEs for very large pages, or PDEs pointing + * to second-level PTEs for smaller pages. If PTE is single-level, + * the least significant bits cannot be used as software bits to encode + * virtual memory subsystem information about the page, and that state + * must be maintained in some parallel data structure. + */ + +/* S or Page Size field in PDE */ +#define __HVM_PDE_S (0x7 << 0) +#define __HVM_PDE_S_4KB 0 +#define __HVM_PDE_S_16KB 1 +#define __HVM_PDE_S_64KB 2 +#define __HVM_PDE_S_256KB 3 +#define __HVM_PDE_S_1MB 4 +#define __HVM_PDE_S_4MB 5 +#define __HVM_PDE_S_16MB 6 +#define __HVM_PDE_S_INVALID 7 + +/* Masks for L2 page table pointer, as function of page size */ +#define __HVM_PDE_PTMASK_4KB 0xfffff000 +#define __HVM_PDE_PTMASK_16KB 0xfffffc00 +#define __HVM_PDE_PTMASK_64KB 0xffffff00 +#define __HVM_PDE_PTMASK_256KB 0xffffffc0 +#define __HVM_PDE_PTMASK_1MB 0xfffffff0 + +/* + * Virtual Machine PTE Bits/Fields + */ +#define __HVM_PTE_T (1<<4) +#define __HVM_PTE_U (1<<5) +#define __HVM_PTE_C (0x7<<6) +#define __HVM_PTE_CVAL(pte) (((pte) & __HVM_PTE_C) >> 6) +#define __HVM_PTE_R (1<<9) +#define __HVM_PTE_W (1<<10) +#define __HVM_PTE_X (1<<11) + +/* + * Cache Attributes, to be shifted as necessary for virtual/physical PTEs + */ + +#define __HEXAGON_C_WB 0x0 /* Write-back, no L2 */ +#define __HEXAGON_C_WT 0x1 /* Write-through, no L2 */ +#define __HEXAGON_C_UNC 0x6 /* Uncached memory */ +#if CONFIG_HEXAGON_ARCH_VERSION >= 2 +#define __HEXAGON_C_DEV 0x4 /* Device register space */ +#else +#define __HEXAGON_C_DEV __HEXAGON_C_UNC +#endif +#define __HEXAGON_C_WT_L2 0x5 /* Write-through, with L2 */ +#define __HEXAGON_C_WB_L2 0x7 /* Write-back, with L2 */ + +/* + * This can be overridden, but we're defaulting to the most aggressive + * cache policy, the better to find bugs sooner. + */ + +#define CACHE_DEFAULT __HEXAGON_C_WB_L2 + +/* Masks for physical page address, as a function of page size */ + +#define __HVM_PTE_PGMASK_4KB 0xfffff000 +#define __HVM_PTE_PGMASK_16KB 0xffffc000 +#define __HVM_PTE_PGMASK_64KB 0xffff0000 +#define __HVM_PTE_PGMASK_256KB 0xfffc0000 +#define __HVM_PTE_PGMASK_1MB 0xfff00000 + +/* Masks for single-level large page lookups */ + +#define __HVM_PTE_PGMASK_4MB 0xffc00000 +#define __HVM_PTE_PGMASK_16MB 0xff000000 + +/* + * "Big kernel page mappings" (see vm_init_segtable.S) + * are currently 16MB + */ + +#define BIG_KERNEL_PAGE_SHIFT 24 +#define BIG_KERNEL_PAGE_SIZE (1 << BIG_KERNEL_PAGE_SHIFT) + + + +#endif /* _ASM_VM_MMU_H */ diff --git a/arch/hexagon/include/asm/vmalloc.h b/arch/hexagon/include/asm/vmalloc.h new file mode 100644 index 000000000..7b04609e5 --- /dev/null +++ b/arch/hexagon/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_HEXAGON_VMALLOC_H +#define _ASM_HEXAGON_VMALLOC_H + +#endif /* _ASM_HEXAGON_VMALLOC_H */ diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild new file mode 100644 index 000000000..e78470141 --- /dev/null +++ b/arch/hexagon/include/uapi/asm/Kbuild @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += ucontext.h diff --git a/arch/hexagon/include/uapi/asm/byteorder.h b/arch/hexagon/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..1364fa4b2 --- /dev/null +++ b/arch/hexagon/include/uapi/asm/byteorder.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_BYTEORDER_H +#define _ASM_BYTEORDER_H + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +#endif + +#include + +#endif /* _ASM_BYTEORDER_H */ diff --git a/arch/hexagon/include/uapi/asm/param.h b/arch/hexagon/include/uapi/asm/param.h new file mode 100644 index 000000000..a1283866d --- /dev/null +++ b/arch/hexagon/include/uapi/asm/param.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_PARAM_H +#define _ASM_PARAM_H + +#define EXEC_PAGESIZE 16384 + +#include + +#endif diff --git a/arch/hexagon/include/uapi/asm/ptrace.h b/arch/hexagon/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..f79de05b8 --- /dev/null +++ b/arch/hexagon/include/uapi/asm/ptrace.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Ptrace definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_PTRACE_H +#define _ASM_PTRACE_H + +#include + +#define instruction_pointer(regs) pt_elr(regs) +#define user_stack_pointer(regs) ((regs)->r29) + +#define profile_pc(regs) instruction_pointer(regs) + +/* kprobe-based event tracer support */ +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); + +#define current_pt_regs() \ + ((struct pt_regs *) \ + ((unsigned long)current_thread_info() + THREAD_SIZE) - 1) + +#if CONFIG_HEXAGON_ARCH_VERSION >= 4 +#define arch_has_single_step() (1) +#endif + + +#endif diff --git a/arch/hexagon/include/uapi/asm/registers.h b/arch/hexagon/include/uapi/asm/registers.h new file mode 100644 index 000000000..d51270f3b --- /dev/null +++ b/arch/hexagon/include/uapi/asm/registers.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Register definitions for the Hexagon architecture + */ + + +#ifndef _ASM_REGISTERS_H +#define _ASM_REGISTERS_H + +#ifndef __ASSEMBLY__ + +/* See kernel/entry.S for further documentation. */ + +/* + * Entry code copies the event record out of guest registers into + * this structure (which is on the stack). + */ + +struct hvm_event_record { + unsigned long vmel; /* Event Linkage (return address) */ + unsigned long vmest; /* Event context - pre-event SSR values */ + unsigned long vmpsp; /* Previous stack pointer */ + unsigned long vmbadva; /* Bad virtual address for addressing events */ +}; + +struct pt_regs { + long restart_r0; /* R0 checkpoint for syscall restart */ + long syscall_nr; /* Only used in system calls */ + union { + struct { + unsigned long usr; + unsigned long preds; + }; + long long int predsusr; + }; + union { + struct { + unsigned long m0; + unsigned long m1; + }; + long long int m1m0; + }; + union { + struct { + unsigned long sa1; + unsigned long lc1; + }; + long long int lc1sa1; + }; + union { + struct { + unsigned long sa0; + unsigned long lc0; + }; + long long int lc0sa0; + }; + union { + struct { + unsigned long ugp; + unsigned long gp; + }; + long long int gpugp; + }; + union { + struct { + unsigned long cs0; + unsigned long cs1; + }; + long long int cs1cs0; + }; + /* + * Be extremely careful with rearranging these, if at all. Some code + * assumes the 32 registers exist exactly like this in memory; + * e.g. kernel/ptrace.c + * e.g. kernel/signal.c (restore_sigcontext) + */ + union { + struct { + unsigned long r00; + unsigned long r01; + }; + long long int r0100; + }; + union { + struct { + unsigned long r02; + unsigned long r03; + }; + long long int r0302; + }; + union { + struct { + unsigned long r04; + unsigned long r05; + }; + long long int r0504; + }; + union { + struct { + unsigned long r06; + unsigned long r07; + }; + long long int r0706; + }; + union { + struct { + unsigned long r08; + unsigned long r09; + }; + long long int r0908; + }; + union { + struct { + unsigned long r10; + unsigned long r11; + }; + long long int r1110; + }; + union { + struct { + unsigned long r12; + unsigned long r13; + }; + long long int r1312; + }; + union { + struct { + unsigned long r14; + unsigned long r15; + }; + long long int r1514; + }; + union { + struct { + unsigned long r16; + unsigned long r17; + }; + long long int r1716; + }; + union { + struct { + unsigned long r18; + unsigned long r19; + }; + long long int r1918; + }; + union { + struct { + unsigned long r20; + unsigned long r21; + }; + long long int r2120; + }; + union { + struct { + unsigned long r22; + unsigned long r23; + }; + long long int r2322; + }; + union { + struct { + unsigned long r24; + unsigned long r25; + }; + long long int r2524; + }; + union { + struct { + unsigned long r26; + unsigned long r27; + }; + long long int r2726; + }; + union { + struct { + unsigned long r28; + unsigned long r29; + }; + long long int r2928; + }; + union { + struct { + unsigned long r30; + unsigned long r31; + }; + long long int r3130; + }; + /* VM dispatch pushes event record onto stack - we can build on it */ + struct hvm_event_record hvmer; +}; + +/* Defines to conveniently access the values */ + +/* + * As of the VM spec 0.5, these registers are now set/retrieved via a + * VM call. On the in-bound side, we just fetch the values + * at the entry points and stuff them into the old record in pt_regs. + * However, on the outbound side, probably at VM rte, we set the + * registers back. + */ + +#define pt_elr(regs) ((regs)->hvmer.vmel) +#define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val)) +#define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK)) +#define user_mode(regs) \ + (((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0) +#define ints_enabled(regs) \ + (((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0) +#define pt_psp(regs) ((regs)->hvmer.vmpsp) +#define pt_badva(regs) ((regs)->hvmer.vmbadva) + +#define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<hvmer.vmest &= ~(1<r29 = (sp);\ + } while (0) + +#define pt_set_kmode(regs) \ + (regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT) + +#define pt_set_usermode(regs) \ + (regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \ + | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT) + +#endif /* ifndef __ASSEMBLY */ + +#endif diff --git a/arch/hexagon/include/uapi/asm/setup.h b/arch/hexagon/include/uapi/asm/setup.h new file mode 100644 index 000000000..8ce9428b1 --- /dev/null +++ b/arch/hexagon/include/uapi/asm/setup.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_SETUP_H +#define _ASM_SETUP_H + +#ifdef __KERNEL__ +#include +#else +#define __init +#endif + +#include + +extern char external_cmdline_buffer; + +void __init setup_arch_memory(void); + +#endif diff --git a/arch/hexagon/include/uapi/asm/sigcontext.h b/arch/hexagon/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..7171edb1b --- /dev/null +++ b/arch/hexagon/include/uapi/asm/sigcontext.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_SIGCONTEXT_H +#define _ASM_SIGCONTEXT_H + +#include + +/* + * Signal context structure - contains all info to do with the state + * before the signal handler was invoked. Note: only add new entries + * to the end of the structure. + */ +struct sigcontext { + struct user_regs_struct sc_regs; +} __aligned(8); + +#endif diff --git a/arch/hexagon/include/uapi/asm/signal.h b/arch/hexagon/include/uapi/asm/signal.h new file mode 100644 index 000000000..a08fc4253 --- /dev/null +++ b/arch/hexagon/include/uapi/asm/signal.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_SIGNAL_H +#define _ASM_SIGNAL_H + +extern unsigned long __rt_sigtramp_template[2]; + +void do_signal(struct pt_regs *regs); + +#include + +#endif diff --git a/arch/hexagon/include/uapi/asm/swab.h b/arch/hexagon/include/uapi/asm/swab.h new file mode 100644 index 000000000..b39f12104 --- /dev/null +++ b/arch/hexagon/include/uapi/asm/swab.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_SWAB_H +#define _ASM_SWAB_H + +#define __SWAB_64_THRU_32__ + +#endif diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h new file mode 100644 index 000000000..432c4db1b --- /dev/null +++ b/arch/hexagon/include/uapi/asm/unistd.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Syscall support for Hexagon + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/* + * The kernel pulls this unistd.h in three different ways: + * 1. the "normal" way which gets all the __NR defines + * 2. with __SYSCALL defined to produce function declarations + * 3. with __SYSCALL defined to produce syscall table initialization + * See also: syscalltab.c + */ + +#define sys_mmap2 sys_mmap_pgoff +#define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYS_EXECVE +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_TIME32_SYSCALLS + +#include diff --git a/arch/hexagon/include/uapi/asm/user.h b/arch/hexagon/include/uapi/asm/user.h new file mode 100644 index 000000000..7327ec59b --- /dev/null +++ b/arch/hexagon/include/uapi/asm/user.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef HEXAGON_ASM_USER_H +#define HEXAGON_ASM_USER_H + +/* + * Layout for registers passed in elf core dumps to userspace. + * + * Basically a rearranged subset of "pt_regs". + * + * Interested parties: libc, gdb... + */ + +struct user_regs_struct { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long r16; + unsigned long r17; + unsigned long r18; + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long r29; + unsigned long r30; + unsigned long r31; + unsigned long sa0; + unsigned long lc0; + unsigned long sa1; + unsigned long lc1; + unsigned long m0; + unsigned long m1; + unsigned long usr; + unsigned long p3_0; + unsigned long gp; + unsigned long ugp; + unsigned long pc; + unsigned long cause; + unsigned long badva; +#if CONFIG_HEXAGON_ARCH_VERSION < 4 + unsigned long pad1; /* pad out to 48 words total */ + unsigned long pad2; /* pad out to 48 words total */ + unsigned long pad3; /* pad out to 48 words total */ +#else + unsigned long cs0; + unsigned long cs1; + unsigned long pad1; /* pad out to 48 words total */ +#endif +}; + +#endif -- cgit v1.2.3