summaryrefslogtreecommitdiffstats
path: root/arch/hexagon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/hexagon
parentInitial commit. (diff)
downloadlinux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz
linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip
Adding upstream version 4.19.249.upstream/4.19.249
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/Kconfig152
-rw-r--r--arch/hexagon/Kconfig.debug1
-rw-r--r--arch/hexagon/Makefile40
-rw-r--r--arch/hexagon/configs/comet_defconfig85
-rw-r--r--arch/hexagon/include/asm/Kbuild38
-rw-r--r--arch/hexagon/include/asm/asm-offsets.h1
-rw-r--r--arch/hexagon/include/asm/atomic.h201
-rw-r--r--arch/hexagon/include/asm/bitops.h298
-rw-r--r--arch/hexagon/include/asm/cache.h36
-rw-r--r--arch/hexagon/include/asm/cacheflush.h101
-rw-r--r--arch/hexagon/include/asm/checksum.h50
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h89
-rw-r--r--arch/hexagon/include/asm/delay.h29
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h40
-rw-r--r--arch/hexagon/include/asm/dma.h29
-rw-r--r--arch/hexagon/include/asm/elf.h234
-rw-r--r--arch/hexagon/include/asm/exec.h28
-rw-r--r--arch/hexagon/include/asm/fixmap.h35
-rw-r--r--arch/hexagon/include/asm/fpu.h4
-rw-r--r--arch/hexagon/include/asm/futex.h108
-rw-r--r--arch/hexagon/include/asm/hexagon_vm.h289
-rw-r--r--arch/hexagon/include/asm/intrinsics.h26
-rw-r--r--arch/hexagon/include/asm/io.h335
-rw-r--r--arch/hexagon/include/asm/irq.h36
-rw-r--r--arch/hexagon/include/asm/irqflags.h62
-rw-r--r--arch/hexagon/include/asm/kgdb.h44
-rw-r--r--arch/hexagon/include/asm/linkage.h25
-rw-r--r--arch/hexagon/include/asm/mem-layout.h120
-rw-r--r--arch/hexagon/include/asm/mmu.h37
-rw-r--r--arch/hexagon/include/asm/mmu_context.h102
-rw-r--r--arch/hexagon/include/asm/module.h26
-rw-r--r--arch/hexagon/include/asm/page.h163
-rw-r--r--arch/hexagon/include/asm/perf_event.h22
-rw-r--r--arch/hexagon/include/asm/pgalloc.h148
-rw-r--r--arch/hexagon/include/asm/pgtable.h485
-rw-r--r--arch/hexagon/include/asm/processor.h149
-rw-r--r--arch/hexagon/include/asm/smp.h43
-rw-r--r--arch/hexagon/include/asm/spinlock.h172
-rw-r--r--arch/hexagon/include/asm/spinlock_types.h40
-rw-r--r--arch/hexagon/include/asm/string.h32
-rw-r--r--arch/hexagon/include/asm/suspend.h27
-rw-r--r--arch/hexagon/include/asm/switch_to.h34
-rw-r--r--arch/hexagon/include/asm/syscall.h46
-rw-r--r--arch/hexagon/include/asm/thread_info.h128
-rw-r--r--arch/hexagon/include/asm/time.h29
-rw-r--r--arch/hexagon/include/asm/timer-regs.h39
-rw-r--r--arch/hexagon/include/asm/timex.h36
-rw-r--r--arch/hexagon/include/asm/tlb.h39
-rw-r--r--arch/hexagon/include/asm/tlbflush.h58
-rw-r--r--arch/hexagon/include/asm/traps.h29
-rw-r--r--arch/hexagon/include/asm/uaccess.h112
-rw-r--r--arch/hexagon/include/asm/vdso.h30
-rw-r--r--arch/hexagon/include/asm/vm_fault.h26
-rw-r--r--arch/hexagon/include/asm/vm_mmu.h110
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild27
-rw-r--r--arch/hexagon/include/uapi/asm/bitsperlong.h27
-rw-r--r--arch/hexagon/include/uapi/asm/byteorder.h29
-rw-r--r--arch/hexagon/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/hexagon/include/uapi/asm/param.h27
-rw-r--r--arch/hexagon/include/uapi/asm/ptrace.h45
-rw-r--r--arch/hexagon/include/uapi/asm/registers.h229
-rw-r--r--arch/hexagon/include/uapi/asm/setup.h35
-rw-r--r--arch/hexagon/include/uapi/asm/sigcontext.h34
-rw-r--r--arch/hexagon/include/uapi/asm/signal.h29
-rw-r--r--arch/hexagon/include/uapi/asm/swab.h25
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h37
-rw-r--r--arch/hexagon/include/uapi/asm/user.h70
-rw-r--r--arch/hexagon/kernel/Makefile20
-rw-r--r--arch/hexagon/kernel/asm-offsets.c104
-rw-r--r--arch/hexagon/kernel/dma.c219
-rw-r--r--arch/hexagon/kernel/head.S237
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c55
-rw-r--r--arch/hexagon/kernel/irq_cpu.c90
-rw-r--r--arch/hexagon/kernel/kgdb.c259
-rw-r--r--arch/hexagon/kernel/module.c162
-rw-r--r--arch/hexagon/kernel/process.c213
-rw-r--r--arch/hexagon/kernel/ptrace.c204
-rw-r--r--arch/hexagon/kernel/reset.c37
-rw-r--r--arch/hexagon/kernel/screen_info.c3
-rw-r--r--arch/hexagon/kernel/setup.c150
-rw-r--r--arch/hexagon/kernel/signal.c270
-rw-r--r--arch/hexagon/kernel/smp.c267
-rw-r--r--arch/hexagon/kernel/stacktrace.c65
-rw-r--r--arch/hexagon/kernel/syscalltab.c32
-rw-r--r--arch/hexagon/kernel/time.c242
-rw-r--r--arch/hexagon/kernel/trampoline.S35
-rw-r--r--arch/hexagon/kernel/traps.c453
-rw-r--r--arch/hexagon/kernel/vdso.c101
-rw-r--r--arch/hexagon/kernel/vm_entry.S393
-rw-r--r--arch/hexagon/kernel/vm_events.c105
-rw-r--r--arch/hexagon/kernel/vm_init_segtable.S442
-rw-r--r--arch/hexagon/kernel/vm_ops.S102
-rw-r--r--arch/hexagon/kernel/vm_switch.S95
-rw-r--r--arch/hexagon/kernel/vm_vectors.S48
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S80
-rw-r--r--arch/hexagon/lib/Makefile4
-rw-r--r--arch/hexagon/lib/checksum.c202
-rw-r--r--arch/hexagon/lib/io.c95
-rw-r--r--arch/hexagon/lib/memcpy.S543
-rw-r--r--arch/hexagon/lib/memset.S315
-rw-r--r--arch/hexagon/mm/Makefile6
-rw-r--r--arch/hexagon/mm/cache.c139
-rw-r--r--arch/hexagon/mm/copy_from_user.S114
-rw-r--r--arch/hexagon/mm/copy_to_user.S92
-rw-r--r--arch/hexagon/mm/copy_user_template.S185
-rw-r--r--arch/hexagon/mm/init.c280
-rw-r--r--arch/hexagon/mm/ioremap.c57
-rw-r--r--arch/hexagon/mm/pgalloc.c23
-rw-r--r--arch/hexagon/mm/strnlen_user.S139
-rw-r--r--arch/hexagon/mm/uaccess.c58
-rw-r--r--arch/hexagon/mm/vm_fault.c197
-rw-r--r--arch/hexagon/mm/vm_tlb.c94
112 files changed, 12340 insertions, 0 deletions
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
new file mode 100644
index 000000000..89a4b22f3
--- /dev/null
+++ b/arch/hexagon/Kconfig
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: GPL-2.0
+# Hexagon configuration
+comment "Linux Kernel Configuration for Hexagon"
+
+config HEXAGON
+ def_bool y
+ select ARCH_NO_PREEMPT
+ select HAVE_OPROFILE
+ # Other pending projects/to-do items.
+ # select HAVE_REGS_AND_STACK_ACCESS_API
+ # select HAVE_HW_BREAKPOINT if PERF_EVENTS
+ # select ARCH_HAS_CPU_IDLE_WAIT
+ # select GPIOLIB
+ # select HAVE_CLK
+ # select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_ATOMIC64
+ select HAVE_PERF_EVENTS
+ # GENERIC_ALLOCATOR is used by dma_alloc_coherent()
+ select GENERIC_ALLOCATOR
+ select GENERIC_IRQ_SHOW
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
+ select NEED_SG_DMA_LENGTH
+ select NO_IOPORT_MAP
+ select GENERIC_IOMAP
+ select GENERIC_SMP_IDLE_THREAD
+ select STACKTRACE_SUPPORT
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST
+ select MODULES_USE_ELF_RELA
+ select GENERIC_CPU_DEVICES
+ ---help---
+ Qualcomm Hexagon is a processor architecture designed for high
+ performance and low power across a wide variety of applications.
+
+config HEXAGON_PHYS_OFFSET
+ def_bool y
+ ---help---
+ Platforms that don't load the kernel at zero set this.
+
+config FRAME_POINTER
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config PCI
+ def_bool n
+
+config EARLY_PRINTK
+ def_bool y
+
+config MMU
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+#
+# Use the generic interrupt handling code in kernel/irq/:
+#
+config GENERIC_IRQ_PROBE
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool n
+
+config RWSEM_XCHGADD_ALGORITHM
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+ select STACKTRACE
+
+config GENERIC_BUG
+ def_bool y
+ depends on BUG
+
+menu "Machine selection"
+
+choice
+ prompt "System type"
+ default HEXAGON_COMET
+
+config HEXAGON_COMET
+ bool "Comet Board"
+ ---help---
+ Support for the Comet platform.
+
+endchoice
+
+config HEXAGON_ARCH_VERSION
+ int "Architecture version"
+ default 2
+
+config CMDLINE
+ string "Default kernel command string"
+ default ""
+ help
+ On some platforms, there is currently no way for the boot loader
+ to pass arguments to the kernel. For these, you should supply some
+ command-line options at build time by entering them here. At a
+ minimum, you should specify the memory size and the root device
+ (e.g., mem=64M root=/dev/nfs).
+
+config SMP
+ bool "Multi-Processing support"
+ ---help---
+ Enables SMP support in the kernel. If unsure, say "Y"
+
+config NR_CPUS
+ int "Maximum number of CPUs" if SMP
+ range 2 6 if SMP
+ default "1" if !SMP
+ default "6" if SMP
+ ---help---
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support. The maximum supported value is 6 and the
+ minimum value which makes sense is 2.
+
+ This is purely to save memory - each supported CPU adds
+ approximately eight kilobytes to the kernel image.
+
+choice
+ prompt "Kernel page size"
+ default PAGE_SIZE_4KB
+ ---help---
+ Changes the default page size; use with caution.
+
+config PAGE_SIZE_4KB
+ bool "4KB"
+
+config PAGE_SIZE_16KB
+ bool "16KB"
+
+config PAGE_SIZE_64KB
+ bool "64KB"
+
+config PAGE_SIZE_256KB
+ bool "256KB"
+
+endchoice
+
+source "kernel/Kconfig.hz"
+
+endmenu
diff --git a/arch/hexagon/Kconfig.debug b/arch/hexagon/Kconfig.debug
new file mode 100644
index 000000000..22a162cd9
--- /dev/null
+++ b/arch/hexagon/Kconfig.debug
@@ -0,0 +1 @@
+# dummy file, do not delete
diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
new file mode 100644
index 000000000..4c5858b80
--- /dev/null
+++ b/arch/hexagon/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Hexagon arch
+
+KBUILD_DEFCONFIG = comet_defconfig
+
+# Do not use GP-relative jumps
+KBUILD_CFLAGS += -G0
+LDFLAGS_vmlinux += -G0
+
+# Do not use single-byte enums; these will overflow.
+KBUILD_CFLAGS += -fno-short-enums
+
+# Modules must use either long-calls, or use pic/plt.
+# Use long-calls for now, it's easier. And faster.
+# KBUILD_CFLAGS_MODULE += -fPIC
+# KBUILD_LDFLAGS_MODULE += -shared
+KBUILD_CFLAGS_MODULE += -mlong-calls
+
+cflags-y += $(call cc-option,-mv${CONFIG_HEXAGON_ARCH_VERSION})
+aflags-y += $(call cc-option,-mv${CONFIG_HEXAGON_ARCH_VERSION})
+ldflags-y += $(call cc-option,-mv${CONFIG_HEXAGON_ARCH_VERSION})
+
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_AFLAGS += $(aflags-y)
+KBUILD_LDFLAGS += $(ldflags-y)
+
+# Thread-info register will be r19. This value is not configureable;
+# it is hard-coded in several files.
+TIR_NAME := r19
+KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
+KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
+
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+libs-y += $(LIBGCC)
+
+head-y := arch/hexagon/kernel/head.o
+
+core-y += arch/hexagon/kernel/ \
+ arch/hexagon/mm/ \
+ arch/hexagon/lib/
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
new file mode 100644
index 000000000..e324f65f4
--- /dev/null
+++ b/arch/hexagon/configs/comet_defconfig
@@ -0,0 +1,85 @@
+CONFIG_SMP=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_HZ_100=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_CROSS_COMPILE="hexagon-"
+CONFIG_LOCALVERSION="-smp"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_PHYLIB=y
+CONFIG_NET_ETHERNET=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_BITBANG=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=y
+CONFIG_FRAME_WARN=0
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
new file mode 100644
index 000000000..dd2fd9c0d
--- /dev/null
+++ b/arch/hexagon/include/asm/Kbuild
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += barrier.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += compat.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += extable.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += iomap.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += rwsem.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += serial.h
+generic-y += sizes.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += unaligned.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/hexagon/include/asm/asm-offsets.h b/arch/hexagon/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/hexagon/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
new file mode 100644
index 000000000..15cca7f1f
--- /dev/null
+++ b/arch/hexagon/include/asm/atomic.h
@@ -0,0 +1,201 @@
+/*
+ * Atomic operations for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <linux/types.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+/* Normal writes in our arch don't clear lock reservations */
+
+static inline void atomic_set(atomic_t *v, int new)
+{
+ asm volatile(
+ "1: r6 = memw_locked(%0);\n"
+ " memw_locked(%0,p0) = %1;\n"
+ " if (!P0) jump 1b;\n"
+ :
+ : "r" (&v->counter), "r" (new)
+ : "memory", "p0", "r6"
+ );
+}
+
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
+/**
+ * atomic_read - reads a word, atomically
+ * @v: pointer to atomic value
+ *
+ * Assumes all word reads on our architecture are atomic.
+ */
+#define atomic_read(v) READ_ONCE((v)->counter)
+
+/**
+ * atomic_xchg - atomic
+ * @v: pointer to memory to change
+ * @new: new value (technically passed in a register -- see xchg)
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+
+
+/**
+ * atomic_cmpxchg - atomic compare-and-exchange values
+ * @v: pointer to value to change
+ * @old: desired old value to match
+ * @new: new value to put in
+ *
+ * Parameters are then pointer, value-in-register, value-in-register,
+ * and the output is the old value.
+ *
+ * Apparently this is complicated for archs that don't support
+ * the memw_locked like we do (or it's broken or whatever).
+ *
+ * Kind of the lynchpin of the rest of the generically defined routines.
+ * Remember V2 had that bug with dotnew predicate set by memw_locked.
+ *
+ * "old" is "expected" old val, __oldval is actual old value
+ */
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int __oldval;
+
+ asm volatile(
+ "1: %0 = memw_locked(%1);\n"
+ " { P0 = cmp.eq(%0,%2);\n"
+ " if (!P0.new) jump:nt 2f; }\n"
+ " memw_locked(%1,P0) = %3;\n"
+ " if (!P0) jump 1b;\n"
+ "2:\n"
+ : "=&r" (__oldval)
+ : "r" (&v->counter), "r" (old), "r" (new)
+ : "memory", "p0"
+ );
+
+ return __oldval;
+}
+
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if (!P3) jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if (!P3) jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+ return output; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int output, val; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%2);\n" \
+ " %1 = "#op "(%0,%3);\n" \
+ " memw_locked(%2,P3)=%1;\n" \
+ " if (!P3) jump 1b;\n" \
+ : "=&r" (output), "=&r" (val) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+ return output; \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+/**
+ * atomic_fetch_add_unless - add unless the number is a given value
+ * @v: pointer to value
+ * @a: amount to add
+ * @u: unless value is equal to u
+ *
+ * Returns old value.
+ *
+ */
+
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int __oldval;
+ register int tmp;
+
+ asm volatile(
+ "1: %0 = memw_locked(%2);"
+ " {"
+ " p3 = cmp.eq(%0, %4);"
+ " if (p3.new) jump:nt 2f;"
+ " %1 = add(%0, %3);"
+ " }"
+ " memw_locked(%2, p3) = %1;"
+ " {"
+ " if (!p3) jump 1b;"
+ " }"
+ "2:"
+ : "=&r" (__oldval), "=&r" (tmp)
+ : "r" (v), "r" (a), "r" (u)
+ : "memory", "p3"
+ );
+ return __oldval;
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+
+#endif
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
new file mode 100644
index 000000000..634306cda
--- /dev/null
+++ b/arch/hexagon/include/asm/bitops.h
@@ -0,0 +1,298 @@
+/*
+ * Bit operations for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/barrier.h>
+
+#ifdef __KERNEL__
+
+/*
+ * The offset calculations for these are based on BITS_PER_LONG == 32
+ * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
+ * mask by 0x0000001F)
+ *
+ * Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp
+ */
+
+/**
+ * test_and_clear_bit - clear a bit and return its old value
+ * @nr: bit number to clear
+ * @addr: pointer to memory
+ */
+static inline int test_and_clear_bit(int nr, volatile void *addr)
+{
+ int oldval;
+
+ __asm__ __volatile__ (
+ " {R10 = %1; R11 = asr(%2,#5); }\n"
+ " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+ );
+
+ return oldval;
+}
+
+/**
+ * test_and_set_bit - set a bit and return its old value
+ * @nr: bit number to set
+ * @addr: pointer to memory
+ */
+static inline int test_and_set_bit(int nr, volatile void *addr)
+{
+ int oldval;
+
+ __asm__ __volatile__ (
+ " {R10 = %1; R11 = asr(%2,#5); }\n"
+ " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+ );
+
+
+ return oldval;
+
+}
+
+/**
+ * test_and_change_bit - toggle a bit and return its old value
+ * @nr: bit number to set
+ * @addr: pointer to memory
+ */
+static inline int test_and_change_bit(int nr, volatile void *addr)
+{
+ int oldval;
+
+ __asm__ __volatile__ (
+ " {R10 = %1; R11 = asr(%2,#5); }\n"
+ " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+ );
+
+ return oldval;
+
+}
+
+/*
+ * Atomic, but doesn't care about the return value.
+ * Rewrite later to save a cycle or two.
+ */
+
+static inline void clear_bit(int nr, volatile void *addr)
+{
+ test_and_clear_bit(nr, addr);
+}
+
+static inline void set_bit(int nr, volatile void *addr)
+{
+ test_and_set_bit(nr, addr);
+}
+
+static inline void change_bit(int nr, volatile void *addr)
+{
+ test_and_change_bit(nr, addr);
+}
+
+
+/*
+ * These are allowed to be non-atomic. In fact the generic flavors are
+ * in non-atomic.h. Would it be better to use intrinsics for this?
+ *
+ * OK, writes in our architecture do not invalidate LL/SC, so this has to
+ * be atomic, particularly for things like slab_lock and slab_unlock.
+ *
+ */
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ test_and_clear_bit(nr, addr);
+}
+
+static inline void __set_bit(int nr, volatile unsigned long *addr)
+{
+ test_and_set_bit(nr, addr);
+}
+
+static inline void __change_bit(int nr, volatile unsigned long *addr)
+{
+ test_and_change_bit(nr, addr);
+}
+
+/* Apparently, at least some of these are allowed to be non-atomic */
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ return test_and_clear_bit(nr, addr);
+}
+
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ return test_and_set_bit(nr, addr);
+}
+
+static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ return test_and_change_bit(nr, addr);
+}
+
+static inline int __test_bit(int nr, const volatile unsigned long *addr)
+{
+ int retval;
+
+ asm volatile(
+ "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
+ : "=&r" (retval)
+ : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
+ : "p0"
+ );
+
+ return retval;
+}
+
+#define test_bit(nr, addr) __test_bit(nr, addr)
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline long ffz(int x)
+{
+ int r;
+
+ asm("%0 = ct1(%1);\n"
+ : "=&r" (r)
+ : "r" (x));
+ return r;
+}
+
+/*
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static inline int fls(int x)
+{
+ int r;
+
+ asm("{ %0 = cl0(%1);}\n"
+ "%0 = sub(#32,%0);\n"
+ : "=&r" (r)
+ : "r" (x)
+ : "p0");
+
+ return r;
+}
+
+/*
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ int r;
+
+ asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
+ "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
+ : "=&r" (r)
+ : "r" (x)
+ : "p0");
+
+ return r;
+}
+
+/*
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ *
+ * bits_per_long assumed to be 32
+ * numbering starts at 0 I think (instead of 1 like ffs)
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ int num;
+
+ asm("%0 = ct0(%1);\n"
+ : "=&r" (num)
+ : "r" (word));
+
+ return num;
+}
+
+/*
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ * bits_per_long assumed to be 32
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ int num;
+
+ asm("%0 = cl0(%1);\n"
+ "%0 = sub(#31,%0);\n"
+ : "=&r" (num)
+ : "r" (word));
+
+ return num;
+}
+
+#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/find.h>
+
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
new file mode 100644
index 000000000..69952c184
--- /dev/null
+++ b/arch/hexagon/include/asm/cache.h
@@ -0,0 +1,36 @@
+/*
+ * Cache definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ASM_CACHE_H
+#define __ASM_CACHE_H
+
+/* Bytes per L1 cache line */
+#define L1_CACHE_SHIFT (5)
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
+#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
+
+/* See http://lwn.net/Articles/262554/ */
+#define __read_mostly
+
+#endif
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
new file mode 100644
index 000000000..b86f9f300
--- /dev/null
+++ b/arch/hexagon/include/asm/cacheflush.h
@@ -0,0 +1,101 @@
+/*
+ * Cache flush operations for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm_types.h>
+
+/* Cache flushing:
+ *
+ * - flush_cache_all() flushes entire cache
+ * - flush_cache_mm(mm) flushes the specified mm context's cache lines
+ * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
+ * - flush_cache_range(vma, start, end) flushes a range of pages
+ * - flush_icache_range(start, end) flush a range of instructions
+ * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
+ * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
+ *
+ * Need to doublecheck which one is really needed for ptrace stuff to work.
+ */
+#define LINESIZE 32
+#define LINEBITS 5
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_page(vma, pg) do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+/*
+ * Flush Dcache range through current map.
+ */
+extern void flush_dcache_range(unsigned long start, unsigned long end);
+
+/*
+ * Flush Icache range through current map.
+ */
+extern void flush_icache_range(unsigned long start, unsigned long end);
+
+/*
+ * Memory-management related flushes are there to ensure in non-physically
+ * indexed cache schemes that stale lines belonging to a given ASID aren't
+ * in the cache to confuse things. The prototype Hexagon Virtual Machine
+ * only uses a single ASID for all user-mode maps, which should
+ * mean that they aren't necessary. A brute-force, flush-everything
+ * implementation, with the name xxxxx_hexagon() is present in
+ * arch/hexagon/mm/cache.c, but let's not wire it up until we know
+ * it is needed.
+ */
+extern void flush_cache_all_hexagon(void);
+
+/*
+ * This may or may not ever have to be non-null, depending on the
+ * virtual machine MMU. For a native kernel, it's definitiely a no-op
+ *
+ * This is also the place where deferred cache coherency stuff seems
+ * to happen, classically... but instead we do it like ia64 and
+ * clean the cache when the PTE is set.
+ *
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /* generic_ptrace_pokedata doesn't wind up here, does it? */
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len);
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
+extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
+
+#endif
diff --git a/arch/hexagon/include/asm/checksum.h b/arch/hexagon/include/asm/checksum.h
new file mode 100644
index 000000000..d9f58d696
--- /dev/null
+++ b/arch/hexagon/include/asm/checksum.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_CHECKSUM_H
+#define _ASM_CHECKSUM_H
+
+#define do_csum do_csum
+unsigned int do_csum(const void *voidptr, int len);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+#define csum_partial_copy_nocheck csum_partial_copy_nocheck
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum);
+
+#define csum_tcpudp_magic csum_tcpudp_magic
+__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum);
+
+#include <asm-generic/checksum.h>
+
+#endif
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
new file mode 100644
index 000000000..db2584240
--- /dev/null
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -0,0 +1,89 @@
+/*
+ * xchg/cmpxchg operations for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_CMPXCHG_H
+#define _ASM_CMPXCHG_H
+
+/*
+ * __xchg - atomically exchange a register and a memory location
+ * @x: value to swap
+ * @ptr: pointer to memory
+ * @size: size of the value
+ *
+ * Only 4 bytes supported currently.
+ *
+ * Note: there was an errata for V2 about .new's and memw_locked.
+ *
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
+{
+ unsigned long retval;
+
+ /* Can't seem to use printk or panic here, so just stop */
+ if (size != 4) do { asm volatile("brkpt;\n"); } while (1);
+
+ __asm__ __volatile__ (
+ "1: %0 = memw_locked(%1);\n" /* load into retval */
+ " memw_locked(%1,P0) = %2;\n" /* store into memory */
+ " if (!P0) jump 1b;\n"
+ : "=&r" (retval)
+ : "r" (ptr), "r" (x)
+ : "memory", "p0"
+ );
+ return retval;
+}
+
+/*
+ * Atomically swap the contents of a register with memory. Should be atomic
+ * between multiple CPU's and within interrupts on the same CPU.
+ */
+#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
+ sizeof(*(ptr))))
+
+/*
+ * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
+ * looks just like atomic_cmpxchg on our arch currently with a bunch of
+ * variable casting.
+ */
+
+#define cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __oldval = 0; \
+ \
+ asm volatile( \
+ "1: %0 = memw_locked(%1);\n" \
+ " { P0 = cmp.eq(%0,%2);\n" \
+ " if (!P0.new) jump:nt 2f; }\n" \
+ " memw_locked(%1,p0) = %3;\n" \
+ " if (!P0) jump 1b;\n" \
+ "2:\n" \
+ : "=&r" (__oldval) \
+ : "r" (__ptr), "r" (__old), "r" (__new) \
+ : "memory", "p0" \
+ ); \
+ __oldval; \
+})
+
+#endif /* _ASM_CMPXCHG_H */
diff --git a/arch/hexagon/include/asm/delay.h b/arch/hexagon/include/asm/delay.h
new file mode 100644
index 000000000..8933b9b1a
--- /dev/null
+++ b/arch/hexagon/include/asm/delay.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_DELAY_H
+#define _ASM_DELAY_H
+
+#include <asm/param.h>
+
+extern void __delay(unsigned long cycles);
+extern void __udelay(unsigned long usecs);
+
+#define udelay(usecs) __udelay((usecs))
+
+#endif /* _ASM_DELAY_H */
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
new file mode 100644
index 000000000..263f6acbf
--- /dev/null
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -0,0 +1,40 @@
+/*
+ * DMA operations for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_DMA_MAPPING_H
+#define _ASM_DMA_MAPPING_H
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <asm/io.h>
+
+struct device;
+
+extern const struct dma_map_ops *dma_ops;
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return dma_ops;
+}
+
+#endif
diff --git a/arch/hexagon/include/asm/dma.h b/arch/hexagon/include/asm/dma.h
new file mode 100644
index 000000000..9e34ff49f
--- /dev/null
+++ b/arch/hexagon/include/asm/dma.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h>
+
+#define MAX_DMA_CHANNELS 1
+#define MAX_DMA_ADDRESS (PAGE_OFFSET)
+
+extern size_t hexagon_coherent_pool_size;
+
+#endif
diff --git a/arch/hexagon/include/asm/elf.h b/arch/hexagon/include/asm/elf.h
new file mode 100644
index 000000000..80311e7b8
--- /dev/null
+++ b/arch/hexagon/include/asm/elf.h
@@ -0,0 +1,234 @@
+/*
+ * ELF definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ASM_ELF_H
+#define __ASM_ELF_H
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+/*
+ * This should really be in linux/elf-em.h.
+ */
+#define EM_HEXAGON 164 /* QUALCOMM Hexagon */
+
+struct elf32_hdr;
+
+/*
+ * ELF header e_flags defines.
+ */
+
+/* should have stuff like "CPU type" and maybe "ABI version", etc */
+
+/* Hexagon relocations */
+ /* V2 */
+#define R_HEXAGON_NONE 0
+#define R_HEXAGON_B22_PCREL 1
+#define R_HEXAGON_B15_PCREL 2
+#define R_HEXAGON_B7_PCREL 3
+#define R_HEXAGON_LO16 4
+#define R_HEXAGON_HI16 5
+#define R_HEXAGON_32 6
+#define R_HEXAGON_16 7
+#define R_HEXAGON_8 8
+#define R_HEXAGON_GPREL16_0 9
+#define R_HEXAGON_GPREL16_1 10
+#define R_HEXAGON_GPREL16_2 11
+#define R_HEXAGON_GPREL16_3 12
+#define R_HEXAGON_HL16 13
+ /* V3 */
+#define R_HEXAGON_B13_PCREL 14
+ /* V4 */
+#define R_HEXAGON_B9_PCREL 15
+ /* V4 (extenders) */
+#define R_HEXAGON_B32_PCREL_X 16
+#define R_HEXAGON_32_6_X 17
+ /* V4 (extended) */
+#define R_HEXAGON_B22_PCREL_X 18
+#define R_HEXAGON_B15_PCREL_X 19
+#define R_HEXAGON_B13_PCREL_X 20
+#define R_HEXAGON_B9_PCREL_X 21
+#define R_HEXAGON_B7_PCREL_X 22
+#define R_HEXAGON_16_X 23
+#define R_HEXAGON_12_X 24
+#define R_HEXAGON_11_X 25
+#define R_HEXAGON_10_X 26
+#define R_HEXAGON_9_X 27
+#define R_HEXAGON_8_X 28
+#define R_HEXAGON_7_X 29
+#define R_HEXAGON_6_X 30
+ /* V2 PIC */
+#define R_HEXAGON_32_PCREL 31
+#define R_HEXAGON_COPY 32
+#define R_HEXAGON_GLOB_DAT 33
+#define R_HEXAGON_JMP_SLOT 34
+#define R_HEXAGON_RELATIVE 35
+#define R_HEXAGON_PLT_B22_PCREL 36
+#define R_HEXAGON_GOTOFF_LO16 37
+#define R_HEXAGON_GOTOFF_HI16 38
+#define R_HEXAGON_GOTOFF_32 39
+#define R_HEXAGON_GOT_LO16 40
+#define R_HEXAGON_GOT_HI16 41
+#define R_HEXAGON_GOT_32 42
+#define R_HEXAGON_GOT_16 43
+
+/*
+ * ELF register definitions..
+ */
+typedef unsigned long elf_greg_t;
+
+typedef struct user_regs_struct elf_gregset_t;
+#define ELF_NGREG (sizeof(elf_gregset_t)/sizeof(unsigned long))
+
+/* Placeholder */
+typedef unsigned long elf_fpregset_t;
+
+/*
+ * Bypass the whole "regsets" thing for now and use the define.
+ */
+
+#if CONFIG_HEXAGON_ARCH_VERSION >= 4
+#define CS_COPYREGS(DEST,REGS) \
+do {\
+ DEST.cs0 = REGS->cs0;\
+ DEST.cs1 = REGS->cs1;\
+} while (0)
+#else
+#define CS_COPYREGS(DEST,REGS)
+#endif
+
+#define ELF_CORE_COPY_REGS(DEST, REGS) \
+do { \
+ DEST.r0 = REGS->r00; \
+ DEST.r1 = REGS->r01; \
+ DEST.r2 = REGS->r02; \
+ DEST.r3 = REGS->r03; \
+ DEST.r4 = REGS->r04; \
+ DEST.r5 = REGS->r05; \
+ DEST.r6 = REGS->r06; \
+ DEST.r7 = REGS->r07; \
+ DEST.r8 = REGS->r08; \
+ DEST.r9 = REGS->r09; \
+ DEST.r10 = REGS->r10; \
+ DEST.r11 = REGS->r11; \
+ DEST.r12 = REGS->r12; \
+ DEST.r13 = REGS->r13; \
+ DEST.r14 = REGS->r14; \
+ DEST.r15 = REGS->r15; \
+ DEST.r16 = REGS->r16; \
+ DEST.r17 = REGS->r17; \
+ DEST.r18 = REGS->r18; \
+ DEST.r19 = REGS->r19; \
+ DEST.r20 = REGS->r20; \
+ DEST.r21 = REGS->r21; \
+ DEST.r22 = REGS->r22; \
+ DEST.r23 = REGS->r23; \
+ DEST.r24 = REGS->r24; \
+ DEST.r25 = REGS->r25; \
+ DEST.r26 = REGS->r26; \
+ DEST.r27 = REGS->r27; \
+ DEST.r28 = REGS->r28; \
+ DEST.r29 = pt_psp(REGS); \
+ DEST.r30 = REGS->r30; \
+ DEST.r31 = REGS->r31; \
+ DEST.sa0 = REGS->sa0; \
+ DEST.lc0 = REGS->lc0; \
+ DEST.sa1 = REGS->sa1; \
+ DEST.lc1 = REGS->lc1; \
+ DEST.m0 = REGS->m0; \
+ DEST.m1 = REGS->m1; \
+ DEST.usr = REGS->usr; \
+ DEST.p3_0 = REGS->preds; \
+ DEST.gp = REGS->gp; \
+ DEST.ugp = REGS->ugp; \
+ CS_COPYREGS(DEST,REGS); \
+ DEST.pc = pt_elr(REGS); \
+ DEST.cause = pt_cause(REGS); \
+ DEST.badva = pt_badva(REGS); \
+} while (0);
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ * Checks the machine and ABI type.
+ */
+#define elf_check_arch(hdr) ((hdr)->e_machine == EM_HEXAGON)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_HEXAGON
+
+#if CONFIG_HEXAGON_ARCH_VERSION == 2
+#define ELF_CORE_EFLAGS 0x1
+#endif
+
+#if CONFIG_HEXAGON_ARCH_VERSION == 3
+#define ELF_CORE_EFLAGS 0x2
+#endif
+
+#if CONFIG_HEXAGON_ARCH_VERSION == 4
+#define ELF_CORE_EFLAGS 0x3
+#endif
+
+/*
+ * Some architectures have ld.so set up a pointer to a function
+ * to be registered using atexit, to facilitate cleanup. So that
+ * static executables will be well-behaved, we would null the register
+ * in question here, in the pt_regs structure passed. For now,
+ * leave it a null macro.
+ */
+#define ELF_PLAT_INIT(regs, load_addr) do { } while (0)
+
+#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
+
+/* Hrm is this going to cause problems for changing PAGE_SIZE? */
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE 0x08000000UL
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+
+#endif
diff --git a/arch/hexagon/include/asm/exec.h b/arch/hexagon/include/asm/exec.h
new file mode 100644
index 000000000..c32b21326
--- /dev/null
+++ b/arch/hexagon/include/asm/exec.h
@@ -0,0 +1,28 @@
+/*
+ * Process execution related definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_EXEC_H
+#define _ASM_EXEC_H
+
+/* Should probably shoot for an 8-byte aligned stack pointer */
+#define STACK_MASK (~7)
+#define arch_align_stack(x) (x & STACK_MASK)
+
+#endif /* _ASM_EXEC_H */
diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h
new file mode 100644
index 000000000..1387f84b4
--- /dev/null
+++ b/arch/hexagon/include/asm/fixmap.h
@@ -0,0 +1,35 @@
+/*
+ * Fixmap support for Hexagon - enough to support highmem features
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+/*
+ * A lot of the fixmap info is already in mem-layout.h
+ */
+#include <asm/mem-layout.h>
+
+#include <asm-generic/fixmap.h>
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
+ (vaddr)), (vaddr)), (vaddr))
+
+#endif
diff --git a/arch/hexagon/include/asm/fpu.h b/arch/hexagon/include/asm/fpu.h
new file mode 100644
index 000000000..0e135ea8c
--- /dev/null
+++ b/arch/hexagon/include/asm/fpu.h
@@ -0,0 +1,4 @@
+/*
+ * If the FPU is used inside the kernel,
+ * kernel_fpu_end() will be defined here.
+ */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
new file mode 100644
index 000000000..e8e5e47af
--- /dev/null
+++ b/arch/hexagon/include/asm/futex.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_HEXAGON_FUTEX_H
+#define _ASM_HEXAGON_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+/* XXX TODO-- need to add sync barriers! */
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile( \
+ "1: %0 = memw_locked(%3);\n" \
+ /* For example: %1 = %4 */ \
+ insn \
+ "2: memw_locked(%3,p2) = %1;\n" \
+ " if (!p2) jump 1b;\n" \
+ " %1 = #0;\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: %1 = #%5;\n" \
+ " jump 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".long 1b,4b,2b,4b\n" \
+ ".previous\n" \
+ : "=&r" (oldval), "=&r" (ret), "+m" (*uaddr) \
+ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
+ : "p2", "memory")
+
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("%1 = %4\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("%1 = add(%0,%4)\n", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("%1 = or(%0,%4)\n", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("%1 = not(%4); %1 = and(%0,%1)\n", ret,
+ oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("%1 = xor(%0,%4)\n", ret, oldval, uaddr,
+ oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ u32 newval)
+{
+ int prev;
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ "1: %1 = memw_locked(%3)\n"
+ " {\n"
+ " p2 = cmp.eq(%1,%4)\n"
+ " if (!p2.new) jump:NT 3f\n"
+ " }\n"
+ "2: memw_locked(%3,p2) = %5\n"
+ " if (!p2) jump 1b\n"
+ "3:\n"
+ ".section .fixup,\"ax\"\n"
+ "4: %0 = #%6\n"
+ " jump 3b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ ".long 1b,4b,2b,4b\n"
+ ".previous\n"
+ : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
+ : "r" (uaddr), "r" (oldval), "r" (newval), "i"(-EFAULT)
+ : "p2", "memory");
+
+ *uval = prev;
+ return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_HEXAGON_FUTEX_H */
diff --git a/arch/hexagon/include/asm/hexagon_vm.h b/arch/hexagon/include/asm/hexagon_vm.h
new file mode 100644
index 000000000..e8990c9a6
--- /dev/null
+++ b/arch/hexagon/include/asm/hexagon_vm.h
@@ -0,0 +1,289 @@
+/*
+ * Declarations for to Hexagon Virtal Machine.
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef ASM_HEXAGON_VM_H
+#define ASM_HEXAGON_VM_H
+
+/*
+ * In principle, a Linux kernel for the VM could
+ * selectively define the virtual instructions
+ * as inline assembler macros, but for a first pass,
+ * we'll use subroutines for both the VM and the native
+ * kernels. It's costing a subroutine call/return,
+ * but it makes for a single set of entry points
+ * for tracing/debugging.
+ */
+
+#define HVM_TRAP1_VMVERSION 0
+#define HVM_TRAP1_VMRTE 1
+#define HVM_TRAP1_VMSETVEC 2
+#define HVM_TRAP1_VMSETIE 3
+#define HVM_TRAP1_VMGETIE 4
+#define HVM_TRAP1_VMINTOP 5
+#define HVM_TRAP1_VMCLRMAP 10
+#define HVM_TRAP1_VMNEWMAP 11
+#define HVM_TRAP1_FORMERLY_VMWIRE 12
+#define HVM_TRAP1_VMCACHE 13
+#define HVM_TRAP1_VMGETTIME 14
+#define HVM_TRAP1_VMSETTIME 15
+#define HVM_TRAP1_VMWAIT 16
+#define HVM_TRAP1_VMYIELD 17
+#define HVM_TRAP1_VMSTART 18
+#define HVM_TRAP1_VMSTOP 19
+#define HVM_TRAP1_VMVPID 20
+#define HVM_TRAP1_VMSETREGS 21
+#define HVM_TRAP1_VMGETREGS 22
+#define HVM_TRAP1_VMTIMEROP 24
+
+#ifndef __ASSEMBLY__
+
+enum VM_CACHE_OPS {
+ hvmc_ickill,
+ hvmc_dckill,
+ hvmc_l2kill,
+ hvmc_dccleaninva,
+ hvmc_icinva,
+ hvmc_idsync,
+ hvmc_fetch_cfg
+};
+
+enum VM_INT_OPS {
+ hvmi_nop,
+ hvmi_globen,
+ hvmi_globdis,
+ hvmi_locen,
+ hvmi_locdis,
+ hvmi_affinity,
+ hvmi_get,
+ hvmi_peek,
+ hvmi_status,
+ hvmi_post,
+ hvmi_clear
+};
+
+extern void _K_VM_event_vector(void);
+
+void __vmrte(void);
+long __vmsetvec(void *);
+long __vmsetie(long);
+long __vmgetie(void);
+long __vmintop(enum VM_INT_OPS, long, long, long, long);
+long __vmclrmap(void *, unsigned long);
+long __vmnewmap(void *);
+long __vmcache(enum VM_CACHE_OPS op, unsigned long addr, unsigned long len);
+unsigned long long __vmgettime(void);
+long __vmsettime(unsigned long long);
+long __vmstart(void *, void *);
+void __vmstop(void);
+long __vmwait(void);
+void __vmyield(void);
+long __vmvpid(void);
+
+static inline long __vmcache_ickill(void)
+{
+ return __vmcache(hvmc_ickill, 0, 0);
+}
+
+static inline long __vmcache_dckill(void)
+{
+ return __vmcache(hvmc_dckill, 0, 0);
+}
+
+static inline long __vmcache_l2kill(void)
+{
+ return __vmcache(hvmc_l2kill, 0, 0);
+}
+
+static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len)
+{
+ return __vmcache(hvmc_dccleaninva, addr, len);
+}
+
+static inline long __vmcache_icinva(unsigned long addr, unsigned long len)
+{
+ return __vmcache(hvmc_icinva, addr, len);
+}
+
+static inline long __vmcache_idsync(unsigned long addr,
+ unsigned long len)
+{
+ return __vmcache(hvmc_idsync, addr, len);
+}
+
+static inline long __vmcache_fetch_cfg(unsigned long val)
+{
+ return __vmcache(hvmc_fetch_cfg, val, 0);
+}
+
+/* interrupt operations */
+
+static inline long __vmintop_nop(void)
+{
+ return __vmintop(hvmi_nop, 0, 0, 0, 0);
+}
+
+static inline long __vmintop_globen(long i)
+{
+ return __vmintop(hvmi_globen, i, 0, 0, 0);
+}
+
+static inline long __vmintop_globdis(long i)
+{
+ return __vmintop(hvmi_globdis, i, 0, 0, 0);
+}
+
+static inline long __vmintop_locen(long i)
+{
+ return __vmintop(hvmi_locen, i, 0, 0, 0);
+}
+
+static inline long __vmintop_locdis(long i)
+{
+ return __vmintop(hvmi_locdis, i, 0, 0, 0);
+}
+
+static inline long __vmintop_affinity(long i, long cpu)
+{
+ return __vmintop(hvmi_affinity, i, cpu, 0, 0);
+}
+
+static inline long __vmintop_get(void)
+{
+ return __vmintop(hvmi_get, 0, 0, 0, 0);
+}
+
+static inline long __vmintop_peek(void)
+{
+ return __vmintop(hvmi_peek, 0, 0, 0, 0);
+}
+
+static inline long __vmintop_status(long i)
+{
+ return __vmintop(hvmi_status, i, 0, 0, 0);
+}
+
+static inline long __vmintop_post(long i)
+{
+ return __vmintop(hvmi_post, i, 0, 0, 0);
+}
+
+static inline long __vmintop_clear(long i)
+{
+ return __vmintop(hvmi_clear, i, 0, 0, 0);
+}
+
+#else /* Only assembly code should reference these */
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Constants for virtual instruction parameters and return values
+ */
+
+/* vmnewmap arguments */
+
+#define VM_TRANS_TYPE_LINEAR 0
+#define VM_TRANS_TYPE_TABLE 1
+#define VM_TLB_INVALIDATE_FALSE 0
+#define VM_TLB_INVALIDATE_TRUE 1
+
+/* vmsetie arguments */
+
+#define VM_INT_DISABLE 0
+#define VM_INT_ENABLE 1
+
+/* vmsetimask arguments */
+
+#define VM_INT_UNMASK 0
+#define VM_INT_MASK 1
+
+#define VM_NEWMAP_TYPE_LINEAR 0
+#define VM_NEWMAP_TYPE_PGTABLES 1
+
+
+/*
+ * Event Record definitions useful to both C and Assembler
+ */
+
+/* VMEST Layout */
+
+#define HVM_VMEST_UM_SFT 31
+#define HVM_VMEST_UM_MSK 1
+#define HVM_VMEST_IE_SFT 30
+#define HVM_VMEST_IE_MSK 1
+#define HVM_VMEST_SS_SFT 29
+#define HVM_VMEST_SS_MSK 1
+#define HVM_VMEST_EVENTNUM_SFT 16
+#define HVM_VMEST_EVENTNUM_MSK 0xff
+#define HVM_VMEST_CAUSE_SFT 0
+#define HVM_VMEST_CAUSE_MSK 0xffff
+
+/*
+ * The initial program gets to find a system environment descriptor
+ * on its stack when it begins execution. The first word is a version
+ * code to indicate what is there. Zero means nothing more.
+ */
+
+#define HEXAGON_VM_SED_NULL 0
+
+/*
+ * Event numbers for vector binding
+ */
+
+#define HVM_EV_RESET 0
+#define HVM_EV_MACHCHECK 1
+#define HVM_EV_GENEX 2
+#define HVM_EV_TRAP 8
+#define HVM_EV_INTR 15
+/* These shoud be nuked as soon as we know the VM is up to spec v0.1.1 */
+#define HVM_EV_INTR_0 16
+#define HVM_MAX_INTR 240
+
+/*
+ * Cause values for General Exception
+ */
+
+#define HVM_GE_C_BUS 0x01
+#define HVM_GE_C_XPROT 0x11
+#define HVM_GE_C_XUSER 0x14
+#define HVM_GE_C_INVI 0x15
+#define HVM_GE_C_PRIVI 0x1B
+#define HVM_GE_C_XMAL 0x1C
+#define HVM_GE_C_WREG 0x1D
+#define HVM_GE_C_PCAL 0x1E
+#define HVM_GE_C_RMAL 0x20
+#define HVM_GE_C_WMAL 0x21
+#define HVM_GE_C_RPROT 0x22
+#define HVM_GE_C_WPROT 0x23
+#define HVM_GE_C_RUSER 0x24
+#define HVM_GE_C_WUSER 0x25
+#define HVM_GE_C_CACHE 0x28
+
+/*
+ * Cause codes for Machine Check
+ */
+
+#define HVM_MCHK_C_DOWN 0x00
+#define HVM_MCHK_C_BADSP 0x01
+#define HVM_MCHK_C_BADEX 0x02
+#define HVM_MCHK_C_BADPT 0x03
+#define HVM_MCHK_C_REGWR 0x29
+
+#endif
diff --git a/arch/hexagon/include/asm/intrinsics.h b/arch/hexagon/include/asm/intrinsics.h
new file mode 100644
index 000000000..ca587737f
--- /dev/null
+++ b/arch/hexagon/include/asm/intrinsics.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_HEXAGON_INTRINSICS_H
+#define _ASM_HEXAGON_INTRINSICS_H
+
+#define HEXAGON_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0
+#define HEXAGON_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0
+#define HEXAGON_R_cl0_R __builtin_HEXAGON_S2_cl0
+
+#endif
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
new file mode 100644
index 000000000..bd429d6a3
--- /dev/null
+++ b/arch/hexagon/include/asm/io.h
@@ -0,0 +1,335 @@
+/*
+ * IO definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/iomap.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+/*
+ * We don't have PCI yet.
+ * _IO_BASE is pointing at what should be unused virtual space.
+ */
+#define IO_SPACE_LIMIT 0xffff
+#define _IO_BASE ((void __iomem *)0xfe000000)
+
+#define IOMEM(x) ((void __force __iomem *)(x))
+
+extern int remap_area_pages(unsigned long start, unsigned long phys_addr,
+ unsigned long end, unsigned long flags);
+
+extern void __iounmap(const volatile void __iomem *addr);
+
+/* Defined in lib/io.c, needed for smc91x driver. */
+extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
+extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
+
+extern void __raw_readsl(const void __iomem *addr, void *data, int wordlen);
+extern void __raw_writesl(void __iomem *addr, const void *data, int wordlen);
+
+#define readsw(p, d, l) __raw_readsw(p, d, l)
+#define writesw(p, d, l) __raw_writesw(p, d, l)
+
+#define readsl(p, d, l) __raw_readsl(p, d, l)
+#define writesl(p, d, l) __raw_writesl(p, d, l)
+
+/*
+ * virt_to_phys - map virtual address to physical
+ * @address: address to map
+ */
+static inline unsigned long virt_to_phys(volatile void *address)
+{
+ return __pa(address);
+}
+
+/*
+ * phys_to_virt - map physical address to virtual
+ * @address: address to map
+ */
+static inline void *phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/*
+ * convert a physical pointer to a virtual kernel pointer for
+ * /dev/mem access.
+ */
+#define xlate_dev_kmem_ptr(p) __va(p)
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * IO port access primitives. Hexagon doesn't have special IO access
+ * instructions; all I/O is memory mapped.
+ *
+ * in/out are used for "ports", but we don't have "port instructions",
+ * so these are really just memory mapped too.
+ */
+
+/*
+ * readb - read byte from memory mapped device
+ * @addr: pointer to memory
+ *
+ * Operates on "I/O bus memory space"
+ */
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ u8 val;
+ asm volatile(
+ "%0 = memb(%1);"
+ : "=&r" (val)
+ : "r" (addr)
+ );
+ return val;
+}
+
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ u16 val;
+ asm volatile(
+ "%0 = memh(%1);"
+ : "=&r" (val)
+ : "r" (addr)
+ );
+ return val;
+}
+
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ u32 val;
+ asm volatile(
+ "%0 = memw(%1);"
+ : "=&r" (val)
+ : "r" (addr)
+ );
+ return val;
+}
+
+/*
+ * writeb - write a byte to a memory location
+ * @data: data to write to
+ * @addr: pointer to memory
+ *
+ */
+static inline void writeb(u8 data, volatile void __iomem *addr)
+{
+ asm volatile(
+ "memb(%0) = %1;"
+ :
+ : "r" (addr), "r" (data)
+ : "memory"
+ );
+}
+
+static inline void writew(u16 data, volatile void __iomem *addr)
+{
+ asm volatile(
+ "memh(%0) = %1;"
+ :
+ : "r" (addr), "r" (data)
+ : "memory"
+ );
+
+}
+
+static inline void writel(u32 data, volatile void __iomem *addr)
+{
+ asm volatile(
+ "memw(%0) = %1;"
+ :
+ : "r" (addr), "r" (data)
+ : "memory"
+ );
+}
+
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+/*
+ * http://comments.gmane.org/gmane.linux.ports.arm.kernel/117626
+ */
+
+#define readb_relaxed __raw_readb
+#define readw_relaxed __raw_readw
+#define readl_relaxed __raw_readl
+
+#define writeb_relaxed __raw_writeb
+#define writew_relaxed __raw_writew
+#define writel_relaxed __raw_writel
+
+#define mmiowb()
+
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
+#define ioremap_nocache ioremap
+#define ioremap_uc(X, Y) ioremap((X), (Y))
+
+
+static inline void iounmap(volatile void __iomem *addr)
+{
+ __iounmap(addr);
+}
+
+#define __raw_writel writel
+
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
+ int count)
+{
+ memcpy(dst, (void *) src, count);
+}
+
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+ int count)
+{
+ memcpy((void *) dst, src, count);
+}
+
+static inline void memset_io(volatile void __iomem *addr, int value,
+ size_t size)
+{
+ memset((void __force *)addr, value, size);
+}
+
+#define PCI_IO_ADDR (volatile void __iomem *)
+
+/*
+ * inb - read byte from I/O port or something
+ * @port: address in I/O space
+ *
+ * Operates on "I/O bus I/O space"
+ */
+static inline u8 inb(unsigned long port)
+{
+ return readb(_IO_BASE + (port & IO_SPACE_LIMIT));
+}
+
+static inline u16 inw(unsigned long port)
+{
+ return readw(_IO_BASE + (port & IO_SPACE_LIMIT));
+}
+
+static inline u32 inl(unsigned long port)
+{
+ return readl(_IO_BASE + (port & IO_SPACE_LIMIT));
+}
+
+/*
+ * outb - write a byte to a memory location
+ * @data: data to write to
+ * @addr: address in I/O space
+ */
+static inline void outb(u8 data, unsigned long port)
+{
+ writeb(data, _IO_BASE + (port & IO_SPACE_LIMIT));
+}
+
+static inline void outw(u16 data, unsigned long port)
+{
+ writew(data, _IO_BASE + (port & IO_SPACE_LIMIT));
+}
+
+static inline void outl(u32 data, unsigned long port)
+{
+ writel(data, _IO_BASE + (port & IO_SPACE_LIMIT));
+}
+
+#define outb_p outb
+#define outw_p outw
+#define outl_p outl
+
+#define inb_p inb
+#define inw_p inw
+#define inl_p inl
+
+static inline void insb(unsigned long port, void *buffer, int count)
+{
+ if (count) {
+ u8 *buf = buffer;
+ do {
+ u8 x = inb(port);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void insw(unsigned long port, void *buffer, int count)
+{
+ if (count) {
+ u16 *buf = buffer;
+ do {
+ u16 x = inw(port);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void insl(unsigned long port, void *buffer, int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+ do {
+ u32 x = inw(port);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void outsb(unsigned long port, const void *buffer, int count)
+{
+ if (count) {
+ const u8 *buf = buffer;
+ do {
+ outb(*buf++, port);
+ } while (--count);
+ }
+}
+
+static inline void outsw(unsigned long port, const void *buffer, int count)
+{
+ if (count) {
+ const u16 *buf = buffer;
+ do {
+ outw(*buf++, port);
+ } while (--count);
+ }
+}
+
+static inline void outsl(unsigned long port, const void *buffer, int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+ do {
+ outl(*buf++, port);
+ } while (--count);
+ }
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/hexagon/include/asm/irq.h b/arch/hexagon/include/asm/irq.h
new file mode 100644
index 000000000..51661db38
--- /dev/null
+++ b/arch/hexagon/include/asm/irq.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_IRQ_H_
+#define _ASM_IRQ_H_
+
+/* Number of first-level interrupts associated with the CPU core. */
+#define HEXAGON_CPUINTS 32
+
+/*
+ * Must define NR_IRQS before including <asm-generic/irq.h>
+ * 64 == the two SIRC's, 176 == the two gpio's
+ *
+ * IRQ configuration is still in flux; defining this to a comfortably
+ * large number.
+ */
+#define NR_IRQS 512
+
+#include <asm-generic/irq.h>
+
+#endif
diff --git a/arch/hexagon/include/asm/irqflags.h b/arch/hexagon/include/asm/irqflags.h
new file mode 100644
index 000000000..e5fd9492d
--- /dev/null
+++ b/arch/hexagon/include/asm/irqflags.h
@@ -0,0 +1,62 @@
+/*
+ * IRQ support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#include <asm/hexagon_vm.h>
+#include <linux/types.h>
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ return __vmgetie();
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ return __vmsetie(VM_INT_DISABLE);
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !flags;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return !__vmgetie();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ __vmsetie(VM_INT_ENABLE);
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ __vmsetie(VM_INT_DISABLE);
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ __vmsetie(flags);
+}
+
+#endif
diff --git a/arch/hexagon/include/asm/kgdb.h b/arch/hexagon/include/asm/kgdb.h
new file mode 100644
index 000000000..ccd3ac336
--- /dev/null
+++ b/arch/hexagon/include/asm/kgdb.h
@@ -0,0 +1,44 @@
+/*
+ * arch/hexagon/include/asm/kgdb.h - Hexagon KGDB Support
+ *
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __HEXAGON_KGDB_H__
+#define __HEXAGON_KGDB_H__
+
+#define BREAK_INSTR_SIZE 4
+#define CACHE_FLUSH_IS_SAFE 1
+#define BUFMAX ((NUMREGBYTES * 2) + 512)
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm("trap0(#0xDB)");
+}
+
+/* Registers:
+ * 32 gpr + sa0/1 + lc0/1 + m0/1 + gp + ugp + pred + pc = 42 total.
+ * vm regs = psp+elr+est+badva = 4
+ * syscall+restart = 2 more
+ * also add cs0/1 = 2
+ * so 48 = 42 + 4 + 2 + 2
+ */
+#define DBG_USER_REGS 42
+#define DBG_MAX_REG_NUM (DBG_USER_REGS + 8)
+#define NUMREGBYTES (DBG_MAX_REG_NUM*4)
+
+#endif /* __HEXAGON_KGDB_H__ */
diff --git a/arch/hexagon/include/asm/linkage.h b/arch/hexagon/include/asm/linkage.h
new file mode 100644
index 000000000..31b4cbe7e
--- /dev/null
+++ b/arch/hexagon/include/asm/linkage.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/hexagon/include/asm/mem-layout.h b/arch/hexagon/include/asm/mem-layout.h
new file mode 100644
index 000000000..60556f8c4
--- /dev/null
+++ b/arch/hexagon/include/asm/mem-layout.h
@@ -0,0 +1,120 @@
+/*
+ * Memory layout definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_HEXAGON_MEM_LAYOUT_H
+#define _ASM_HEXAGON_MEM_LAYOUT_H
+
+#include <linux/const.h>
+
+/*
+ * Have to do this for ginormous numbers, else they get printed as
+ * negative numbers, which the linker no likey when you try to
+ * assign it to the location counter.
+ */
+
+#define PAGE_OFFSET _AC(0xc0000000, UL)
+
+/*
+ * Compiling for a platform that needs a crazy physical offset
+ * (like if the memory starts at 1GB and up) means we need
+ * an actual PHYS_OFFSET. Should be set up in head.S.
+ */
+
+#ifdef CONFIG_HEXAGON_PHYS_OFFSET
+#ifndef __ASSEMBLY__
+extern unsigned long __phys_offset;
+#endif
+#define PHYS_OFFSET __phys_offset
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET 0
+#endif
+
+#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
+
+#define TASK_SIZE (PAGE_OFFSET)
+
+/* not sure how these are used yet */
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+
+#ifndef __ASSEMBLY__
+enum fixed_addresses {
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END, /* check for per-cpuism */
+ __end_of_fixed_addresses
+};
+
+#define MIN_KERNEL_SEG (PAGE_OFFSET >> PGDIR_SHIFT) /* L1 shift is 22 bits */
+extern int max_kernel_seg;
+
+/*
+ * Start of vmalloc virtual address space for kernel;
+ * supposed to be based on the amount of physical memory available
+ */
+
+#define VMALLOC_START ((unsigned long) __va(high_memory + VMALLOC_OFFSET))
+
+/* Gap between physical ram and vmalloc space for guard purposes. */
+#define VMALLOC_OFFSET PAGE_SIZE
+
+/*
+ * Create the space between VMALLOC_START and FIXADDR_TOP backwards
+ * from the ... "top".
+ *
+ * Permanent IO mappings will live at 0xfexx_xxxx
+ * Hypervisor occupies the last 16MB page at 0xffxxxxxx
+ */
+
+#define FIXADDR_TOP 0xfe000000
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+/*
+ * "permanent kernel mappings", defined as long-lasting mappings of
+ * high-memory page frames into the kernel address space.
+ */
+
+#define LAST_PKMAP PTRS_PER_PTE
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+/*
+ * To the "left" of the fixed map space is the kmap space
+ *
+ * "Permanent Kernel Mappings"; fancy (or less fancy) PTE table
+ * that looks like it's actually walked.
+ * Need to check the alignment/shift usage; some archs use
+ * PMD_MASK on this value
+ */
+#define PKMAP_BASE (FIXADDR_START-PAGE_SIZE*LAST_PKMAP)
+
+/*
+ * 2 pages of guard gap between where vmalloc area ends
+ * and pkmap_base begins.
+ */
+#define VMALLOC_END (PKMAP_BASE-PAGE_SIZE*2)
+#endif /* !__ASSEMBLY__ */
+
+
+#endif /* _ASM_HEXAGON_MEM_LAYOUT_H */
diff --git a/arch/hexagon/include/asm/mmu.h b/arch/hexagon/include/asm/mmu.h
new file mode 100644
index 000000000..2288b19fd
--- /dev/null
+++ b/arch/hexagon/include/asm/mmu.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_MMU_H
+#define _ASM_MMU_H
+
+#include <asm/vdso.h>
+
+/*
+ * Architecture-specific state for a mm_struct.
+ * For the Hexagon Virtual Machine, it can be a copy
+ * of the pointer to the page table base.
+ */
+struct mm_context {
+ unsigned long long generation;
+ unsigned long ptbase;
+ struct hexagon_vdso *vdso;
+};
+
+typedef struct mm_context mm_context_t;
+
+#endif
diff --git a/arch/hexagon/include/asm/mmu_context.h b/arch/hexagon/include/asm/mmu_context.h
new file mode 100644
index 000000000..d8a071afd
--- /dev/null
+++ b/arch/hexagon/include/asm/mmu_context.h
@@ -0,0 +1,102 @@
+/*
+ * MM context support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_MMU_CONTEXT_H
+#define _ASM_MMU_CONTEXT_H
+
+#include <linux/mm_types.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/mem-layout.h>
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+/*
+ * VM port hides all TLB management, so "lazy TLB" isn't very
+ * meaningful. Even for ports to architectures with visble TLBs,
+ * this is almost invariably a null function.
+ */
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
+/*
+ * Architecture-specific actions, if any, for memory map deactivation.
+ */
+static inline void deactivate_mm(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+}
+
+/**
+ * init_new_context - initialize context related info for new mm_struct instance
+ * @tsk: pointer to a task struct
+ * @mm: pointer to a new mm struct
+ */
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ /* mm->context is set up by pgd_alloc */
+ return 0;
+}
+
+/*
+ * Switch active mm context
+ */
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ int l1;
+
+ /*
+ * For virtual machine, we have to update system map if it's been
+ * touched.
+ */
+ if (next->context.generation < prev->context.generation) {
+ for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++)
+ next->pgd[l1] = init_mm.pgd[l1];
+
+ next->context.generation = prev->context.generation;
+ }
+
+ __vmnewmap((void *)next->context.ptbase);
+}
+
+/*
+ * Activate new memory map for task
+ */
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm(prev, next, current_thread_info()->task);
+ local_irq_restore(flags);
+}
+
+/* Generic hooks for arch_dup_mmap and arch_exit_mmap */
+#include <asm-generic/mm_hooks.h>
+
+#endif
diff --git a/arch/hexagon/include/asm/module.h b/arch/hexagon/include/asm/module.h
new file mode 100644
index 000000000..6b4323ace
--- /dev/null
+++ b/arch/hexagon/include/asm/module.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_MODULE_H
+#define _ASM_MODULE_H
+
+#include <asm-generic/module.h>
+
+#define MODULE_ARCH_VERMAGIC __stringify(PROCESSOR_MODEL_NAME) " "
+
+#endif
diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h
new file mode 100644
index 000000000..93f5669b4
--- /dev/null
+++ b/arch/hexagon/include/asm/page.h
@@ -0,0 +1,163 @@
+/*
+ * Page management definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#include <linux/const.h>
+
+/* This is probably not the most graceful way to handle this. */
+
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PAGE_SHIFT 12
+#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_16KB
+#define PAGE_SHIFT 14
+#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define PAGE_SHIFT 16
+#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_256KB
+#define PAGE_SHIFT 18
+#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_1MB
+#define PAGE_SHIFT 20
+#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
+#endif
+
+/*
+ * These should be defined in hugetlb.h, but apparently not.
+ * "Huge" for us should be 4MB or 16MB, which are both represented
+ * in L1 PTE's. Right now, it's set up for 4MB.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SHIFT 22
+#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE-1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
+#define HVM_HUGEPAGE_SIZE 0x5
+#endif
+
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+/*
+ * This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in.
+ */
+#include <linux/pfn.h>
+
+/*
+ * We implement a two-level architecture-specific page table structure.
+ * Null intermediate page table level (pmd, pud) definitions will come from
+ * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+/*
+ * We need a __pa and a __va routine for kernel space.
+ * MIPS says they're only used during mem_init.
+ * also, check if we need a PHYS_OFFSET.
+ */
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
+
+/* The "page frame" descriptor is defined in linux/mm.h */
+struct page;
+
+/* Returns page frame descriptor for virtual address. */
+#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
+
+/* Default vm area behavior is non-executable. */
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Need to not use a define for linesize; may move this to another file. */
+static inline void clear_page(void *page)
+{
+ /* This can only be done on pages with L1 WB cache */
+ asm volatile(
+ " loop0(1f,%1);\n"
+ "1: { dczeroa(%0);\n"
+ " %0 = add(%0,#32); }:endloop0\n"
+ : "+r" (page)
+ : "r" (PAGE_SIZE/32)
+ : "lc0", "sa0", "memory"
+ );
+}
+
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+/*
+ * Under assumption that kernel always "sees" user map...
+ */
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * page_to_phys - convert page to physical address
+ * @page - pointer to page entry in mem_map
+ */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define page_to_virt(page) __va(page_to_phys(page))
+
+/*
+ * For port to Hexagon Virtual Machine, MAYBE we check for attempts
+ * to reference reserved HVM space, but in any case, the VM will be
+ * protected.
+ */
+#define kern_addr_valid(addr) (1)
+
+#include <asm/mem-layout.h>
+#include <asm-generic/memory_model.h>
+/* XXX Todo: implement assembly-optimized version of getorder. */
+#include <asm-generic/getorder.h>
+
+#endif /* ifdef __ASSEMBLY__ */
+#endif /* ifdef __KERNEL__ */
+
+#endif
diff --git a/arch/hexagon/include/asm/perf_event.h b/arch/hexagon/include/asm/perf_event.h
new file mode 100644
index 000000000..430978b1d
--- /dev/null
+++ b/arch/hexagon/include/asm/perf_event.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_PERF_EVENT_H
+#define _ASM_PERF_EVENT_H
+
+#endif /* _ASM_PERF_EVENT_H */
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
new file mode 100644
index 000000000..eeebf862c
--- /dev/null
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -0,0 +1,148 @@
+/*
+ * Page table support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <asm/mem-layout.h>
+#include <asm/atomic.h>
+
+#define check_pgt_cache() do {} while (0)
+
+extern unsigned long long kmap_generation;
+
+/*
+ * Page table creation interface
+ */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd;
+
+ pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+
+ /*
+ * There may be better ways to do this, but to ensure
+ * that new address spaces always contain the kernel
+ * base mapping, and to ensure that the user area is
+ * initially marked invalid, initialize the new map
+ * map with a copy of the kernel's persistent map.
+ */
+
+ memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
+ mm->context.generation = kmap_generation;
+
+ /* Physical version is what is passed to virtual machine on switch */
+ mm->context.ptbase = __pa(pgd);
+
+ return pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long) pgd);
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *pte;
+
+ pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pte)
+ return NULL;
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
+ }
+ return pte;
+}
+
+/* _kernel variant gets to use a different allocator */
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO;
+ return (pte_t *) __get_free_page(flags);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte)
+{
+ /*
+ * Conveniently, zero in 3 LSB means indirect 4K page table.
+ * Not so convenient when you're trying to vary the page size.
+ */
+ set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) |
+ HEXAGON_L1_PTE_SIZE));
+}
+
+/*
+ * Other architectures seem to have ways of making all processes
+ * share the same pmd's for their kernel mappings, but the v0.3
+ * Hexagon VM spec has a "monolithic" L1 table for user and kernel
+ * segments. We track "generations" of the kernel map to minimize
+ * overhead, and update the "slave" copies of the kernel mappings
+ * as part of switch_mm. However, we still need to update the
+ * kernel map of the active thread who's calling pmd_populate_kernel...
+ */
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ extern spinlock_t kmap_gen_lock;
+ pmd_t *ppmd;
+ int pmdindex;
+
+ spin_lock(&kmap_gen_lock);
+ kmap_generation++;
+ mm->context.generation = kmap_generation;
+ current->active_mm->context.generation = kmap_generation;
+ spin_unlock(&kmap_gen_lock);
+
+ set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
+
+ /*
+ * Now the "slave" copy of the current thread.
+ * This is pointer arithmetic, not byte addresses!
+ */
+ pmdindex = (pgd_t *)pmd - mm->pgd;
+ ppmd = (pmd_t *)current->active_mm->pgd + pmdindex;
+ set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE));
+ if (pmdindex > max_kernel_seg)
+ max_kernel_seg = pmdindex;
+}
+
+#define __pte_free_tlb(tlb, pte, addr) \
+do { \
+ pgtable_page_dtor((pte)); \
+ tlb_remove_page((tlb), (pte)); \
+} while (0)
+
+#endif
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
new file mode 100644
index 000000000..65125d0b0
--- /dev/null
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -0,0 +1,485 @@
+/*
+ * Page table support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_PGTABLE_H
+#define _ASM_PGTABLE_H
+
+/*
+ * Page table definitions for Qualcomm Hexagon processor.
+ */
+#include <asm/page.h>
+#define __ARCH_USE_5LEVEL_HACK
+#include <asm-generic/pgtable-nopmd.h>
+
+/* A handy thing to have if one has the RAM. Declared in head.S */
+extern unsigned long empty_zero_page;
+
+/*
+ * The PTE model described here is that of the Hexagon Virtual Machine,
+ * which autonomously walks 2-level page tables. At a lower level, we
+ * also describe the RISCish software-loaded TLB entry structure of
+ * the underlying Hexagon processor. A kernel built to run on the
+ * virtual machine has no need to know about the underlying hardware.
+ */
+#include <asm/vm_mmu.h>
+
+/*
+ * To maximize the comfort level for the PTE manipulation macros,
+ * define the "well known" architecture-specific bits.
+ */
+#define _PAGE_READ __HVM_PTE_R
+#define _PAGE_WRITE __HVM_PTE_W
+#define _PAGE_EXECUTE __HVM_PTE_X
+#define _PAGE_USER __HVM_PTE_U
+
+/*
+ * We have a total of 4 "soft" bits available in the abstract PTE.
+ * The two mandatory software bits are Dirty and Accessed.
+ * To make nonlinear swap work according to the more recent
+ * model, we want a low order "Present" bit to indicate whether
+ * the PTE describes MMU programming or swap space.
+ */
+#define _PAGE_PRESENT (1<<0)
+#define _PAGE_DIRTY (1<<1)
+#define _PAGE_ACCESSED (1<<2)
+
+/*
+ * For now, let's say that Valid and Present are the same thing.
+ * Alternatively, we could say that it's the "or" of R, W, and X
+ * permissions.
+ */
+#define _PAGE_VALID _PAGE_PRESENT
+
+/*
+ * We're not defining _PAGE_GLOBAL here, since there's no concept
+ * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
+ * and we want to use the same page table structures and macros in
+ * the native kernel as we do in the virtual machine kernel.
+ * So we'll put up with a bit of inefficiency for now...
+ */
+
+/*
+ * Top "FOURTH" level (pgd), which for the Hexagon VM is really
+ * only the second from the bottom, pgd and pud both being collapsed.
+ * Each entry represents 4MB of virtual address space, 4K of table
+ * thus maps the full 4GB.
+ */
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define PTRS_PER_PTE 1024
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_16KB
+#define PTRS_PER_PTE 256
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define PTRS_PER_PTE 64
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_256KB
+#define PTRS_PER_PTE 16
+#endif
+
+#ifdef CONFIG_PAGE_SIZE_1MB
+#define PTRS_PER_PTE 4
+#endif
+
+/* Any bigger and the PTE disappears. */
+#define pgd_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
+ pgd_val(e))
+
+/*
+ * Page Protection Constants. Includes (in this variant) cache attributes.
+ */
+extern unsigned long _dflt_cache_att;
+
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _dflt_cache_att)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
+#define PAGE_COPY PAGE_READONLY
+#define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
+#define PAGE_COPY_EXEC PAGE_EXEC
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
+ _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
+
+
+/*
+ * Aliases for mapping mmap() protection bits to page protections.
+ * These get used for static initialization, so using the _dflt_cache_att
+ * variable for the default cache attribute isn't workable. If the
+ * default gets changed at boot time, the boot option code has to
+ * update data structures like the protaction_map[] array.
+ */
+#define CACHEDEF (CACHE_DEFAULT << 6)
+
+/* Private (copy-on-write) page protections. */
+#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
+#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
+#define __P010 __P000 /* Write-only copy-on-write */
+#define __P011 __P001 /* Read/Write copy-on-write */
+#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_EXECUTE | CACHEDEF)
+#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
+ _PAGE_READ | CACHEDEF)
+#define __P110 __P100 /* Write/execute copy-on-write */
+#define __P111 __P101 /* Read/Write/Execute, copy-on-write */
+
+/* Shared page protections. */
+#define __S000 __P000
+#define __S001 __P001
+#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_WRITE | CACHEDEF)
+#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
+ _PAGE_WRITE | CACHEDEF)
+#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_EXECUTE | CACHEDEF)
+#define __S101 __P101
+#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
+#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
+ _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
+
+/* Seems to be zero even in architectures where the zero page is firewalled? */
+#define FIRST_USER_ADDRESS 0UL
+#define pte_special(pte) 0
+#define pte_mkspecial(pte) (pte)
+
+/* HUGETLB not working currently */
+#ifdef CONFIG_HUGETLB_PAGE
+#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
+#endif
+
+/*
+ * For now, assume that higher-level code will do TLB/MMU invalidations
+ * and don't insert that overhead into this low-level function.
+ */
+extern void sync_icache_dcache(pte_t pte);
+
+#define pte_present_exec_user(pte) \
+ ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
+ (_PAGE_EXECUTE | _PAGE_USER))
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ /* should really be using pte_exec, if it weren't declared later. */
+ if (pte_present_exec_user(pteval))
+ sync_icache_dcache(pteval);
+
+ *ptep = pteval;
+}
+
+/*
+ * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
+ * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
+ * (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7
+ * as a universal null entry, but some of those least significant bits
+ * are interpreted by software.
+ */
+#define _NULL_PMD 0x7
+#define _NULL_PTE 0x0
+
+static inline void pmd_clear(pmd_t *pmd_entry_ptr)
+{
+ pmd_val(*pmd_entry_ptr) = _NULL_PMD;
+}
+
+/*
+ * Conveniently, a null PTE value is invalid.
+ */
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_val(*ptep) = _NULL_PTE;
+}
+
+#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
+/**
+ * pmd_index - returns the index of the entry in the PMD page
+ * which would control the given virtual address
+ */
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+#endif
+
+/**
+ * pgd_index - returns the index of the entry in the PGD page
+ * which would control the given virtual address
+ *
+ * This returns the *index* for the address in the pgd_t
+ */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+/*
+ * pgd_offset - find an offset in a page-table-directory
+ */
+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+
+/*
+ * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/**
+ * pmd_none - check if pmd_entry is mapped
+ * @pmd_entry: pmd entry
+ *
+ * MIPS checks it against that "invalid pte table" thing.
+ */
+static inline int pmd_none(pmd_t pmd)
+{
+ return pmd_val(pmd) == _NULL_PMD;
+}
+
+/**
+ * pmd_present - is there a page table behind this?
+ * Essentially the inverse of pmd_none. We maybe
+ * save an inline instruction by defining it this
+ * way, instead of simply "!pmd_none".
+ */
+static inline int pmd_present(pmd_t pmd)
+{
+ return pmd_val(pmd) != (unsigned long)_NULL_PMD;
+}
+
+/**
+ * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
+ * As we have no known cause of badness, it's null, as it is for many
+ * architectures.
+ */
+static inline int pmd_bad(pmd_t pmd)
+{
+ return 0;
+}
+
+/*
+ * pmd_page - converts a PMD entry to a page pointer
+ */
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+/**
+ * pte_none - check if pte is mapped
+ * @pte: pte_t entry
+ */
+static inline int pte_none(pte_t pte)
+{
+ return pte_val(pte) == _NULL_PTE;
+};
+
+/*
+ * pte_present - check if page is present
+ */
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/* mk_pte - make a PTE out of a page pointer and protection bits */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+/* pte_mkold - mark PTE as not recently accessed */
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_ACCESSED;
+ return pte;
+}
+
+/* pte_mkyoung - mark PTE as recently accessed */
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ return pte;
+}
+
+/* pte_mkclean - mark page as in sync with backing store */
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_DIRTY;
+ return pte;
+}
+
+/* pte_mkdirty - mark page as modified */
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+/* pte_young - "is PTE marked as accessed"? */
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+/* pte_dirty - "is PTE dirty?" */
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+/* pte_modify - set protection bits on PTE */
+static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
+{
+ pte_val(pte) &= PAGE_MASK;
+ pte_val(pte) |= pgprot_val(prot);
+ return pte;
+}
+
+/* pte_wrprotect - mark page as not writable */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_WRITE;
+ return pte;
+}
+
+/* pte_mkwrite - mark page as writable */
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ return pte;
+}
+
+/* pte_mkexec - mark PTE as executable */
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_EXECUTE;
+ return pte;
+}
+
+/* pte_read - "is PTE marked as readable?" */
+static inline int pte_read(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_READ;
+}
+
+/* pte_write - "is PTE marked as writable?" */
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_WRITE;
+}
+
+
+/* pte_exec - "is PTE marked as executable?" */
+static inline int pte_exec(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_EXECUTE;
+}
+
+/* __pte_to_swp_entry - extract swap entry from PTE */
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+
+/* __swp_entry_to_pte - extract PTE from swap entry */
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+/* pfn_pte - convert page number and protection value to page table entry */
+#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
+
+/* pte_pfn - convert pte to page frame number */
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+
+/*
+ * set_pte_at - update page table and do whatever magic may be
+ * necessary to make the underlying hardware/firmware take note.
+ *
+ * VM may require a virtual instruction to alert the MMU.
+ */
+#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
+
+/*
+ * May need to invoke the virtual machine as well...
+ */
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+/*
+ * pte_offset_map - returns the linear address of the page table entry
+ * corresponding to an address
+ */
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
+
+#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
+
+/* pte_offset_kernel - kernel version of pte_offset */
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
+ + __pte_offset(address))
+
+/* ZERO_PAGE - returns the globally shared zero page */
+#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+
+#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/* I think this is in case we have page table caches; needed by init/main.c */
+#define pgtable_cache_init() do { } while (0)
+
+/*
+ * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
+ * interpreted as swap information. The remaining free bits are interpreted as
+ * swap type/offset tuple. Rather than have the TLB fill handler test
+ * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
+ * all zeros for swap entries, which speeds up the miss handler at the cost of
+ * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon
+ * processor architecture and target applications suggest a lot of TLB misses
+ * and not much swap space.
+ *
+ * Format of swap PTE:
+ * bit 0: Present (zero)
+ * bits 1-5: swap type (arch independent layer uses 5 bits max)
+ * bits 6-9: bits 3:0 of offset
+ * bits 10-12: effectively _PAGE_PROTNONE (all zero)
+ * bits 13-31: bits 22:4 of swap offset
+ *
+ * The split offset makes some of the following macros a little gnarly,
+ * but there's plenty of precedent for this sort of thing.
+ */
+
+/* Used for swap PTEs */
+#define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f)
+
+#define __swp_offset(swp_pte) \
+ ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
+
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { \
+ ((type << 1) | \
+ ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
+
+/* Oh boy. There are a lot of possible arch overrides found in this file. */
+#include <asm-generic/pgtable.h>
+
+#endif
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
new file mode 100644
index 000000000..ce6794086
--- /dev/null
+++ b/arch/hexagon/include/asm/processor.h
@@ -0,0 +1,149 @@
+/*
+ * Process/processor support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_PROCESSOR_H
+#define _ASM_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/mem-layout.h>
+#include <asm/registers.h>
+#include <asm/hexagon_vm.h>
+
+/* must be a macro */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/* task_struct, defined elsewhere, is the "process descriptor" */
+struct task_struct;
+
+extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
+
+/*
+ * thread_struct is supposed to be for context switch data.
+ * Specifically, to hold the state necessary to perform switch_to...
+ */
+struct thread_struct {
+ void *switch_sp;
+};
+
+/*
+ * initializes thread_struct
+ * The only thing we have in there is switch_sp
+ * which doesn't really need to be initialized.
+ */
+
+#define INIT_THREAD { \
+}
+
+#define cpu_relax() __vmyield()
+
+/*
+ * Decides where the kernel will search for a free chunk of vm space during
+ * mmaps.
+ * See also arch_get_unmapped_area.
+ * Doesn't affect if you have MAX_FIXED in the page flags set though...
+ *
+ * Apparently the convention is that ld.so will ask for "unmapped" private
+ * memory to be allocated SOMEWHERE, but it also asks for memory explicitly
+ * via MAP_FIXED at the lower * addresses starting at VA=0x0.
+ *
+ * If the two requests collide, you get authentic segfaulting action, so
+ * you have to kick the "unmapped" base requests higher up.
+ */
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE/3))
+
+
+#define task_pt_regs(task) \
+ ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
+
+#define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk)))
+#define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk)))
+
+/* Free all resources held by a thread; defined in process.c */
+extern void release_thread(struct task_struct *dead_task);
+
+/* Get wait channel for task P. */
+extern unsigned long get_wchan(struct task_struct *p);
+
+/* The following stuff is pretty HEXAGON specific. */
+
+/* This is really just here for __switch_to.
+ Offsets are pulled via asm-offsets.c */
+
+/*
+ * No real reason why VM and native switch stacks should be different.
+ * Ultimately this should merge. Note that Rev C. ABI called out only
+ * R24-27 as callee saved GPRs needing explicit attention (R29-31 being
+ * dealt with automagically by allocframe), but the current ABI has
+ * more, R16-R27. By saving more, the worst case is that we waste some
+ * cycles if building with the old compilers.
+ */
+
+struct hexagon_switch_stack {
+ union {
+ struct {
+ unsigned long r16;
+ unsigned long r17;
+ };
+ unsigned long long r1716;
+ };
+ union {
+ struct {
+ unsigned long r18;
+ unsigned long r19;
+ };
+ unsigned long long r1918;
+ };
+ union {
+ struct {
+ unsigned long r20;
+ unsigned long r21;
+ };
+ unsigned long long r2120;
+ };
+ union {
+ struct {
+ unsigned long r22;
+ unsigned long r23;
+ };
+ unsigned long long r2322;
+ };
+ union {
+ struct {
+ unsigned long r24;
+ unsigned long r25;
+ };
+ unsigned long long r2524;
+ };
+ union {
+ struct {
+ unsigned long r26;
+ unsigned long r27;
+ };
+ unsigned long long r2726;
+ };
+
+ unsigned long fp;
+ unsigned long lr;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/hexagon/include/asm/smp.h b/arch/hexagon/include/asm/smp.h
new file mode 100644
index 000000000..ca171c138
--- /dev/null
+++ b/arch/hexagon/include/asm/smp.h
@@ -0,0 +1,43 @@
+/*
+ * SMP definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+#include <linux/cpumask.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+enum ipi_message_type {
+ IPI_NOP = 0,
+ IPI_RESCHEDULE = 1,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_TIMER,
+};
+
+extern void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg);
+extern void smp_start_cpus(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+extern void smp_vm_unmask_irq(void *info);
+
+#endif
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
new file mode 100644
index 000000000..e090f6a15
--- /dev/null
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -0,0 +1,172 @@
+/*
+ * Spinlock support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SPINLOCK_H
+#define _ASM_SPINLOCK_H
+
+#include <asm/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+/*
+ * This file is pulled in for SMP builds.
+ * Really need to check all the barrier stuff for "true" SMP
+ */
+
+/*
+ * Read locks:
+ * - load the lock value
+ * - increment it
+ * - if the lock value is still negative, go back and try again.
+ * - unsuccessful store is unsuccessful. Go back and try again. Loser.
+ * - successful store new lock value if positive -> lock acquired
+ */
+static inline void arch_read_lock(arch_rwlock_t *lock)
+{
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0);\n"
+ " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
+ " { if (!P3) jump 1b; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+ );
+
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *lock)
+{
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0);\n"
+ " R6 = add(R6,#-1);\n"
+ " memw_locked(%0,P3) = R6\n"
+ " if (!P3) jump 1b;\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+ );
+
+}
+
+/* I think this returns 0 on fail, 1 on success. */
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ int temp;
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1);\n"
+ " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
+ " { if (!P3) jump 1f; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " { %0 = P3 }\n"
+ "1:\n"
+ : "=&r" (temp)
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+ );
+ return temp;
+}
+
+/* Stuffs a -1 in the lock value? */
+static inline void arch_write_lock(arch_rwlock_t *lock)
+{
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0)\n"
+ " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
+ " { if (!P3) jump 1b; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+ );
+}
+
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ int temp;
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1)\n"
+ " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
+ " { if (!P3) jump 1f; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " %0 = P3;\n"
+ "1:\n"
+ : "=&r" (temp)
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+ );
+ return temp;
+
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *lock)
+{
+ smp_mb();
+ lock->lock = 0;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0);\n"
+ " P3 = cmp.eq(R6,#0);\n"
+ " { if (!P3) jump 1b; R6 = #1; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+ );
+
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+ lock->lock = 0;
+}
+
+static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int temp;
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1);\n"
+ " P3 = cmp.eq(R6,#0);\n"
+ " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " %0 = P3;\n"
+ "1:\n"
+ : "=&r" (temp)
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+ );
+ return temp;
+}
+
+/*
+ * SMP spinlocks are intended to allow only a single CPU at the lock
+ */
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+
+#endif
diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h
new file mode 100644
index 000000000..7a906b521
--- /dev/null
+++ b/arch/hexagon/include/asm/spinlock_types.h
@@ -0,0 +1,40 @@
+/*
+ * Spinlock support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/hexagon/include/asm/string.h b/arch/hexagon/include/asm/string.h
new file mode 100644
index 000000000..7d37f47a1
--- /dev/null
+++ b/arch/hexagon/include/asm/string.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_STRING_H_
+#define _ASM_STRING_H_
+
+#ifdef __KERNEL__
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+/* ToDo: use dczeroa, accelerate the compiler-constant zero case */
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__to, int c, size_t __n);
+#endif
+
+
+#endif /* _ASM_STRING_H_ */
diff --git a/arch/hexagon/include/asm/suspend.h b/arch/hexagon/include/asm/suspend.h
new file mode 100644
index 000000000..18b44b557
--- /dev/null
+++ b/arch/hexagon/include/asm/suspend.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SUSPEND_H
+#define _ASM_SUSPEND_H
+
+static inline int arch_prepare_suspend(void)
+{
+ return 0;
+}
+
+#endif
diff --git a/arch/hexagon/include/asm/switch_to.h b/arch/hexagon/include/asm/switch_to.h
new file mode 100644
index 000000000..96745e7b3
--- /dev/null
+++ b/arch/hexagon/include/asm/switch_to.h
@@ -0,0 +1,34 @@
+/*
+ * Task switching definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SWITCH_TO_H
+#define _ASM_SWITCH_TO_H
+
+struct thread_struct;
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(p, n, r) do {\
+ r = __switch_to((p), (n), (r));\
+} while (0)
+
+#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h
new file mode 100644
index 000000000..4af9c7b6f
--- /dev/null
+++ b/arch/hexagon/include/asm/syscall.h
@@ -0,0 +1,46 @@
+/*
+ * Syscall support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_HEXAGON_SYSCALL_H
+#define _ASM_HEXAGON_SYSCALL_H
+
+typedef long (*syscall_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
+#include <asm-generic/syscalls.h>
+
+extern void *sys_call_table[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r06;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
+}
+#endif
diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h
new file mode 100644
index 000000000..f41f9c6f0
--- /dev/null
+++ b/arch/hexagon/include/asm/thread_info.h
@@ -0,0 +1,128 @@
+/*
+ * Thread support for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/registers.h>
+#include <asm/page.h>
+#endif
+
+#define THREAD_SHIFT 12
+#define THREAD_SIZE (1<<THREAD_SHIFT)
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+/*
+ * This is union'd with the "bottom" of the kernel stack.
+ * It keeps track of thread info which is handy for routines
+ * to access quickly.
+ */
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ __u32 cpu; /* current cpu */
+ int preempt_count; /* 0=>preemptible,<0=>BUG */
+ mm_segment_t addr_limit; /* segmentation sux */
+ /*
+ * used for syscalls somehow;
+ * seems to have a function pointer and four arguments
+ */
+ /* Points to the current pt_regs frame */
+ struct pt_regs *regs;
+ /*
+ * saved kernel sp at switch_to time;
+ * not sure if this is used (it's not in the VM model it seems;
+ * see thread_struct)
+ */
+ unsigned long sp;
+};
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/asm-offsets.h>
+
+#endif /* __ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .sp = 0, \
+ .regs = NULL, \
+}
+
+/* Tacky preprocessor trickery */
+#define qqstr(s) qstr(s)
+#define qstr(s) #s
+#define QUOTED_THREADINFO_REG qqstr(THREADINFO_REG)
+
+register struct thread_info *__current_thread_info asm(QUOTED_THREADINFO_REG);
+#define current_thread_info() __current_thread_info
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files
+ * may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */
+#define TIF_RESTORE_SIGMASK 6 /* restore sig mask in do_signal() */
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 17 /* OOM killer killed process */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+
+/* work to do on interrupt/exception return - All but TIF_SYSCALL_TRACE */
+#define _TIF_WORK_MASK (0x0000FFFF & ~_TIF_SYSCALL_TRACE)
+
+/* work to do on any return to u-space */
+#define _TIF_ALLWORK_MASK 0x0000FFFF
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/hexagon/include/asm/time.h b/arch/hexagon/include/asm/time.h
new file mode 100644
index 000000000..deda170c0
--- /dev/null
+++ b/arch/hexagon/include/asm/time.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef ASM_TIME_H
+#define ASM_TIME_H
+
+extern cycles_t pcycle_freq_mhz;
+extern cycles_t thread_freq_mhz;
+extern cycles_t sleep_clk_freq;
+
+void setup_percpu_clockdev(void);
+void ipi_timer(void);
+
+#endif
diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h
new file mode 100644
index 000000000..79912b8c1
--- /dev/null
+++ b/arch/hexagon/include/asm/timer-regs.h
@@ -0,0 +1,39 @@
+/*
+ * Timer support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_TIMER_REGS_H
+#define _ASM_TIMER_REGS_H
+
+/* This stuff should go into a platform specific file */
+#define TCX0_CLK_RATE 19200
+#define TIMER_ENABLE 0
+#define TIMER_CLR_ON_MATCH 1
+
+/*
+ * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
+ * release 1.1, and then it's "adjustable" and probably not defaulted.
+ */
+#define RTOS_TIMER_INT 3
+#ifdef CONFIG_HEXAGON_COMET
+#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
+#endif
+#define SLEEP_CLK_RATE 32000
+
+#endif
diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h
new file mode 100644
index 000000000..f63fe132f
--- /dev/null
+++ b/arch/hexagon/include/asm/timex.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_TIMEX_H
+#define _ASM_TIMEX_H
+
+#include <asm-generic/timex.h>
+#include <asm/timer-regs.h>
+
+/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
+#define CLOCK_TICK_RATE TCX0_CLK_RATE
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+static inline int read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = (unsigned long) __vmgettime();
+ return 0;
+}
+
+#endif
diff --git a/arch/hexagon/include/asm/tlb.h b/arch/hexagon/include/asm/tlb.h
new file mode 100644
index 000000000..2f00772cc
--- /dev/null
+++ b/arch/hexagon/include/asm/tlb.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_TLB_H
+#define _ASM_TLB_H
+
+#include <linux/pagemap.h>
+#include <asm/tlbflush.h>
+
+/*
+ * We don't need any special per-pte or per-vma handling...
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+/*
+ * .. because we flush the whole mm when it fills up
+ */
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif
diff --git a/arch/hexagon/include/asm/tlbflush.h b/arch/hexagon/include/asm/tlbflush.h
new file mode 100644
index 000000000..62d95a970
--- /dev/null
+++ b/arch/hexagon/include/asm/tlbflush.h
@@ -0,0 +1,58 @@
+/*
+ * TLB flush support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_TLBFLUSH_H
+#define _ASM_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+/*
+ * TLB flushing -- in "SMP", these routines get defined to be the
+ * ones from smp.c, else they are some local flavors.
+ */
+
+/*
+ * These functions are commonly macros, but in the interests of
+ * VM vs. native implementation and code size, we simply declare
+ * the function prototypes here.
+ */
+extern void tlb_flush_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_one(unsigned long);
+
+/*
+ * "This is called in munmap when we have freed up some page-table pages.
+ * We don't need to do anything here..."
+ *
+ * The VM kernel doesn't walk page tables, and they are passed to the VMM
+ * by logical address. There doesn't seem to be any possibility that they
+ * could be referenced by the VM kernel based on a stale mapping, since
+ * they would only be located by consulting the mm structure, and they
+ * will have been purged from that structure by the munmap. Seems like
+ * a noop on HVM as well.
+ */
+#define flush_tlb_pgtables(mm, start, end)
+
+#endif
diff --git a/arch/hexagon/include/asm/traps.h b/arch/hexagon/include/asm/traps.h
new file mode 100644
index 000000000..ec1128501
--- /dev/null
+++ b/arch/hexagon/include/asm/traps.h
@@ -0,0 +1,29 @@
+/*
+ * Trap support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_HEXAGON_TRAPS_H
+#define _ASM_HEXAGON_TRAPS_H
+
+#include <asm/registers.h>
+
+extern int die(const char *str, struct pt_regs *regs, long err);
+extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
+
+#endif /* _ASM_HEXAGON_TRAPS_H */
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
new file mode 100644
index 000000000..458b69886
--- /dev/null
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -0,0 +1,112 @@
+/*
+ * User memory access support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+/*
+ * User space memory access functions
+ */
+#include <linux/mm.h>
+#include <asm/segment.h>
+#include <asm/sections.h>
+
+/*
+ * access_ok: - Checks if a user space pointer is valid
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
+ * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ * to write to a block, it is always safe to read from it.
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns true (nonzero) if the memory block *may* be valid, false (zero)
+ * if it is definitely invalid.
+ *
+ * User address space in Hexagon, like x86, goes to 0xbfffffff, so the
+ * simple MSB-based tests used by MIPS won't work. Some further
+ * optimization is probably possible here, but for now, keep it
+ * reasonably simple and not *too* slow. After all, we've got the
+ * MMU for backup.
+ */
+
+#define __access_ok(addr, size) \
+ ((get_fs().seg == KERNEL_DS.seg) || \
+ (((unsigned long)addr < get_fs().seg) && \
+ (unsigned long)size < (get_fs().seg - (unsigned long)addr)))
+
+/*
+ * When a kernel-mode page fault is taken, the faulting instruction
+ * address is checked against a table of exception_table_entries.
+ * Each entry is a tuple of the address of an instruction that may
+ * be authorized to fault, and the address at which execution should
+ * be resumed instead of the faulting instruction, so as to effect
+ * a workaround.
+ */
+
+/* Assembly somewhat optimized copy routines */
+unsigned long raw_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+unsigned long raw_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
+#define __clear_user(a, s) __clear_user_hexagon((a), (s))
+
+#define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
+
+/* get around the ifndef in asm-generic/uaccess.h */
+#define __strnlen_user __strnlen_user
+
+extern long __strnlen_user(const char __user *src, long n);
+
+static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
+ long n);
+
+#include <asm-generic/uaccess.h>
+
+/* Todo: an actual accelerated version of this. */
+static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
+ long n)
+{
+ long res = __strnlen_user(src, n);
+
+ if (unlikely(!res))
+ return -EFAULT;
+
+ if (res > n) {
+ long left = raw_copy_from_user(dst, src, n);
+ if (unlikely(left))
+ memset(dst + (n - left), 0, left);
+ return n;
+ } else {
+ long left = raw_copy_from_user(dst, src, res);
+ if (unlikely(left))
+ memset(dst + (res - left), 0, left);
+ return res-1;
+ }
+}
+
+#endif
diff --git a/arch/hexagon/include/asm/vdso.h b/arch/hexagon/include/asm/vdso.h
new file mode 100644
index 000000000..ed08e6c68
--- /dev/null
+++ b/arch/hexagon/include/asm/vdso.h
@@ -0,0 +1,30 @@
+/*
+ * vDSO implementation for Hexagon
+ *
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ASM_VDSO_H
+#define __ASM_VDSO_H
+
+#include <linux/types.h>
+
+struct hexagon_vdso {
+ u32 rt_signal_trampoline[2];
+};
+
+#endif /* __ASM_VDSO_H */
diff --git a/arch/hexagon/include/asm/vm_fault.h b/arch/hexagon/include/asm/vm_fault.h
new file mode 100644
index 000000000..9b0e9c50c
--- /dev/null
+++ b/arch/hexagon/include/asm/vm_fault.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_HEXAGON_VM_FAULT_H
+#define _ASM_HEXAGON_VM_FAULT_H
+
+extern void execute_protection_fault(struct pt_regs *);
+extern void write_protection_fault(struct pt_regs *);
+extern void read_protection_fault(struct pt_regs *);
+
+#endif
diff --git a/arch/hexagon/include/asm/vm_mmu.h b/arch/hexagon/include/asm/vm_mmu.h
new file mode 100644
index 000000000..6fc29d9d5
--- /dev/null
+++ b/arch/hexagon/include/asm/vm_mmu.h
@@ -0,0 +1,110 @@
+/*
+ * Hexagon VM page table entry definitions
+ *
+ * Copyright (c) 2010-2011,2013 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_VM_MMU_H
+#define _ASM_VM_MMU_H
+
+/*
+ * Shift, mask, and other constants for the Hexagon Virtual Machine
+ * page tables.
+ *
+ * Virtual machine MMU allows first-level entries to either be
+ * single-level lookup PTEs for very large pages, or PDEs pointing
+ * to second-level PTEs for smaller pages. If PTE is single-level,
+ * the least significant bits cannot be used as software bits to encode
+ * virtual memory subsystem information about the page, and that state
+ * must be maintained in some parallel data structure.
+ */
+
+/* S or Page Size field in PDE */
+#define __HVM_PDE_S (0x7 << 0)
+#define __HVM_PDE_S_4KB 0
+#define __HVM_PDE_S_16KB 1
+#define __HVM_PDE_S_64KB 2
+#define __HVM_PDE_S_256KB 3
+#define __HVM_PDE_S_1MB 4
+#define __HVM_PDE_S_4MB 5
+#define __HVM_PDE_S_16MB 6
+#define __HVM_PDE_S_INVALID 7
+
+/* Masks for L2 page table pointer, as function of page size */
+#define __HVM_PDE_PTMASK_4KB 0xfffff000
+#define __HVM_PDE_PTMASK_16KB 0xfffffc00
+#define __HVM_PDE_PTMASK_64KB 0xffffff00
+#define __HVM_PDE_PTMASK_256KB 0xffffffc0
+#define __HVM_PDE_PTMASK_1MB 0xfffffff0
+
+/*
+ * Virtual Machine PTE Bits/Fields
+ */
+#define __HVM_PTE_T (1<<4)
+#define __HVM_PTE_U (1<<5)
+#define __HVM_PTE_C (0x7<<6)
+#define __HVM_PTE_CVAL(pte) (((pte) & __HVM_PTE_C) >> 6)
+#define __HVM_PTE_R (1<<9)
+#define __HVM_PTE_W (1<<10)
+#define __HVM_PTE_X (1<<11)
+
+/*
+ * Cache Attributes, to be shifted as necessary for virtual/physical PTEs
+ */
+
+#define __HEXAGON_C_WB 0x0 /* Write-back, no L2 */
+#define __HEXAGON_C_WT 0x1 /* Write-through, no L2 */
+#define __HEXAGON_C_UNC 0x6 /* Uncached memory */
+#if CONFIG_HEXAGON_ARCH_VERSION >= 2
+#define __HEXAGON_C_DEV 0x4 /* Device register space */
+#else
+#define __HEXAGON_C_DEV __HEXAGON_C_UNC
+#endif
+#define __HEXAGON_C_WT_L2 0x5 /* Write-through, with L2 */
+#define __HEXAGON_C_WB_L2 0x7 /* Write-back, with L2 */
+
+/*
+ * This can be overridden, but we're defaulting to the most aggressive
+ * cache policy, the better to find bugs sooner.
+ */
+
+#define CACHE_DEFAULT __HEXAGON_C_WB_L2
+
+/* Masks for physical page address, as a function of page size */
+
+#define __HVM_PTE_PGMASK_4KB 0xfffff000
+#define __HVM_PTE_PGMASK_16KB 0xffffc000
+#define __HVM_PTE_PGMASK_64KB 0xffff0000
+#define __HVM_PTE_PGMASK_256KB 0xfffc0000
+#define __HVM_PTE_PGMASK_1MB 0xfff00000
+
+/* Masks for single-level large page lookups */
+
+#define __HVM_PTE_PGMASK_4MB 0xffc00000
+#define __HVM_PTE_PGMASK_16MB 0xff000000
+
+/*
+ * "Big kernel page mappings" (see vm_init_segtable.S)
+ * are currently 16MB
+ */
+
+#define BIG_KERNEL_PAGE_SHIFT 24
+#define BIG_KERNEL_PAGE_SIZE (1 << BIG_KERNEL_PAGE_SHIFT)
+
+
+
+#endif /* _ASM_VM_MMU_H */
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..41a176dbb
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -0,0 +1,27 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += auxvec.h
+generic-y += bpf_perf_event.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
+generic-y += ucontext.h
diff --git a/arch/hexagon/include/uapi/asm/bitsperlong.h b/arch/hexagon/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000..5adca0d26
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ASM_HEXAGON_BITSPERLONG_H
+#define __ASM_HEXAGON_BITSPERLONG_H
+
+#define __BITS_PER_LONG 32
+
+#include <asm-generic/bitsperlong.h>
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/byteorder.h b/arch/hexagon/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..1364fa4b2
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/byteorder.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_BYTEORDER_H
+#define _ASM_BYTEORDER_H
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+# define __BYTEORDER_HAS_U64__
+#endif
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h
new file mode 100644
index 000000000..baacc4996
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/kvm_para.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#include <asm-generic/kvm_para.h>
diff --git a/arch/hexagon/include/uapi/asm/param.h b/arch/hexagon/include/uapi/asm/param.h
new file mode 100644
index 000000000..a1283866d
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/param.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_PARAM_H
+#define _ASM_PARAM_H
+
+#define EXEC_PAGESIZE 16384
+
+#include <asm-generic/param.h>
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/ptrace.h b/arch/hexagon/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..f79de05b8
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/ptrace.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Ptrace definitions for the Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
+
+#include <asm/registers.h>
+
+#define instruction_pointer(regs) pt_elr(regs)
+#define user_stack_pointer(regs) ((regs)->r29)
+
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* kprobe-based event tracer support */
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+
+#define current_pt_regs() \
+ ((struct pt_regs *) \
+ ((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
+
+#if CONFIG_HEXAGON_ARCH_VERSION >= 4
+#define arch_has_single_step() (1)
+#endif
+
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/registers.h b/arch/hexagon/include/uapi/asm/registers.h
new file mode 100644
index 000000000..d51270f3b
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/registers.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Register definitions for the Hexagon architecture
+ */
+
+
+#ifndef _ASM_REGISTERS_H
+#define _ASM_REGISTERS_H
+
+#ifndef __ASSEMBLY__
+
+/* See kernel/entry.S for further documentation. */
+
+/*
+ * Entry code copies the event record out of guest registers into
+ * this structure (which is on the stack).
+ */
+
+struct hvm_event_record {
+ unsigned long vmel; /* Event Linkage (return address) */
+ unsigned long vmest; /* Event context - pre-event SSR values */
+ unsigned long vmpsp; /* Previous stack pointer */
+ unsigned long vmbadva; /* Bad virtual address for addressing events */
+};
+
+struct pt_regs {
+ long restart_r0; /* R0 checkpoint for syscall restart */
+ long syscall_nr; /* Only used in system calls */
+ union {
+ struct {
+ unsigned long usr;
+ unsigned long preds;
+ };
+ long long int predsusr;
+ };
+ union {
+ struct {
+ unsigned long m0;
+ unsigned long m1;
+ };
+ long long int m1m0;
+ };
+ union {
+ struct {
+ unsigned long sa1;
+ unsigned long lc1;
+ };
+ long long int lc1sa1;
+ };
+ union {
+ struct {
+ unsigned long sa0;
+ unsigned long lc0;
+ };
+ long long int lc0sa0;
+ };
+ union {
+ struct {
+ unsigned long ugp;
+ unsigned long gp;
+ };
+ long long int gpugp;
+ };
+ union {
+ struct {
+ unsigned long cs0;
+ unsigned long cs1;
+ };
+ long long int cs1cs0;
+ };
+ /*
+ * Be extremely careful with rearranging these, if at all. Some code
+ * assumes the 32 registers exist exactly like this in memory;
+ * e.g. kernel/ptrace.c
+ * e.g. kernel/signal.c (restore_sigcontext)
+ */
+ union {
+ struct {
+ unsigned long r00;
+ unsigned long r01;
+ };
+ long long int r0100;
+ };
+ union {
+ struct {
+ unsigned long r02;
+ unsigned long r03;
+ };
+ long long int r0302;
+ };
+ union {
+ struct {
+ unsigned long r04;
+ unsigned long r05;
+ };
+ long long int r0504;
+ };
+ union {
+ struct {
+ unsigned long r06;
+ unsigned long r07;
+ };
+ long long int r0706;
+ };
+ union {
+ struct {
+ unsigned long r08;
+ unsigned long r09;
+ };
+ long long int r0908;
+ };
+ union {
+ struct {
+ unsigned long r10;
+ unsigned long r11;
+ };
+ long long int r1110;
+ };
+ union {
+ struct {
+ unsigned long r12;
+ unsigned long r13;
+ };
+ long long int r1312;
+ };
+ union {
+ struct {
+ unsigned long r14;
+ unsigned long r15;
+ };
+ long long int r1514;
+ };
+ union {
+ struct {
+ unsigned long r16;
+ unsigned long r17;
+ };
+ long long int r1716;
+ };
+ union {
+ struct {
+ unsigned long r18;
+ unsigned long r19;
+ };
+ long long int r1918;
+ };
+ union {
+ struct {
+ unsigned long r20;
+ unsigned long r21;
+ };
+ long long int r2120;
+ };
+ union {
+ struct {
+ unsigned long r22;
+ unsigned long r23;
+ };
+ long long int r2322;
+ };
+ union {
+ struct {
+ unsigned long r24;
+ unsigned long r25;
+ };
+ long long int r2524;
+ };
+ union {
+ struct {
+ unsigned long r26;
+ unsigned long r27;
+ };
+ long long int r2726;
+ };
+ union {
+ struct {
+ unsigned long r28;
+ unsigned long r29;
+ };
+ long long int r2928;
+ };
+ union {
+ struct {
+ unsigned long r30;
+ unsigned long r31;
+ };
+ long long int r3130;
+ };
+ /* VM dispatch pushes event record onto stack - we can build on it */
+ struct hvm_event_record hvmer;
+};
+
+/* Defines to conveniently access the values */
+
+/*
+ * As of the VM spec 0.5, these registers are now set/retrieved via a
+ * VM call. On the in-bound side, we just fetch the values
+ * at the entry points and stuff them into the old record in pt_regs.
+ * However, on the outbound side, probably at VM rte, we set the
+ * registers back.
+ */
+
+#define pt_elr(regs) ((regs)->hvmer.vmel)
+#define pt_set_elr(regs, val) ((regs)->hvmer.vmel = (val))
+#define pt_cause(regs) ((regs)->hvmer.vmest & (HVM_VMEST_CAUSE_MSK))
+#define user_mode(regs) \
+ (((regs)->hvmer.vmest & (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT)) != 0)
+#define ints_enabled(regs) \
+ (((regs)->hvmer.vmest & (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)) != 0)
+#define pt_psp(regs) ((regs)->hvmer.vmpsp)
+#define pt_badva(regs) ((regs)->hvmer.vmbadva)
+
+#define pt_set_singlestep(regs) ((regs)->hvmer.vmest |= (1<<HVM_VMEST_SS_SFT))
+#define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))
+
+#define pt_set_rte_sp(regs, sp) do {\
+ pt_psp(regs) = (regs)->r29 = (sp);\
+ } while (0)
+
+#define pt_set_kmode(regs) \
+ (regs)->hvmer.vmest = (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
+
+#define pt_set_usermode(regs) \
+ (regs)->hvmer.vmest = (HVM_VMEST_UM_MSK << HVM_VMEST_UM_SFT) \
+ | (HVM_VMEST_IE_MSK << HVM_VMEST_IE_SFT)
+
+#endif /* ifndef __ASSEMBLY */
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/setup.h b/arch/hexagon/include/uapi/asm/setup.h
new file mode 100644
index 000000000..8ce9428b1
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/setup.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SETUP_H
+#define _ASM_SETUP_H
+
+#ifdef __KERNEL__
+#include <linux/init.h>
+#else
+#define __init
+#endif
+
+#include <asm-generic/setup.h>
+
+extern char external_cmdline_buffer;
+
+void __init setup_arch_memory(void);
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/sigcontext.h b/arch/hexagon/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..7171edb1b
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/sigcontext.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SIGCONTEXT_H
+#define _ASM_SIGCONTEXT_H
+
+#include <asm/user.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+ struct user_regs_struct sc_regs;
+} __aligned(8);
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/signal.h b/arch/hexagon/include/uapi/asm/signal.h
new file mode 100644
index 000000000..a08fc4253
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/signal.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SIGNAL_H
+#define _ASM_SIGNAL_H
+
+extern unsigned long __rt_sigtramp_template[2];
+
+void do_signal(struct pt_regs *regs);
+
+#include <asm-generic/signal.h>
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/swab.h b/arch/hexagon/include/uapi/asm/swab.h
new file mode 100644
index 000000000..b39f12104
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/swab.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_SWAB_H
+#define _ASM_SWAB_H
+
+#define __SWAB_64_THRU_32__
+
+#endif
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..ea181e791
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Syscall support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The kernel pulls this unistd.h in three different ways:
+ * 1. the "normal" way which gets all the __NR defines
+ * 2. with __SYSCALL defined to produce function declarations
+ * 3. with __SYSCALL defined to produce syscall table initialization
+ * See also: syscalltab.c
+ */
+
+#define sys_mmap2 sys_mmap_pgoff
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#include <asm-generic/unistd.h>
diff --git a/arch/hexagon/include/uapi/asm/user.h b/arch/hexagon/include/uapi/asm/user.h
new file mode 100644
index 000000000..7327ec59b
--- /dev/null
+++ b/arch/hexagon/include/uapi/asm/user.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef HEXAGON_ASM_USER_H
+#define HEXAGON_ASM_USER_H
+
+/*
+ * Layout for registers passed in elf core dumps to userspace.
+ *
+ * Basically a rearranged subset of "pt_regs".
+ *
+ * Interested parties: libc, gdb...
+ */
+
+struct user_regs_struct {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27;
+ unsigned long r28;
+ unsigned long r29;
+ unsigned long r30;
+ unsigned long r31;
+ unsigned long sa0;
+ unsigned long lc0;
+ unsigned long sa1;
+ unsigned long lc1;
+ unsigned long m0;
+ unsigned long m1;
+ unsigned long usr;
+ unsigned long p3_0;
+ unsigned long gp;
+ unsigned long ugp;
+ unsigned long pc;
+ unsigned long cause;
+ unsigned long badva;
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
+ unsigned long pad1; /* pad out to 48 words total */
+ unsigned long pad2; /* pad out to 48 words total */
+ unsigned long pad3; /* pad out to 48 words total */
+#else
+ unsigned long cs0;
+ unsigned long cs1;
+ unsigned long pad1; /* pad out to 48 words total */
+#endif
+};
+
+#endif
diff --git a/arch/hexagon/kernel/Makefile b/arch/hexagon/kernel/Makefile
new file mode 100644
index 000000000..fae3dce32
--- /dev/null
+++ b/arch/hexagon/kernel/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+extra-y := head.o vmlinux.lds
+
+obj-$(CONFIG_SMP) += smp.o
+
+obj-y += setup.o irq_cpu.o traps.o syscalltab.o signal.o time.o
+obj-y += process.o trampoline.o reset.o ptrace.o vdso.o
+
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_MODULES) += module.o hexagon_ksyms.o
+
+# Modules required to work with the Hexagon Virtual Machine
+obj-y += vm_entry.o vm_events.o vm_switch.o vm_ops.o vm_init_segtable.o
+obj-y += vm_vectors.o
+
+obj-$(CONFIG_HAS_DMA) += dma.o
+
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+
+obj-$(CONFIG_VGA_CONSOLE) += screen_info.o
diff --git a/arch/hexagon/kernel/asm-offsets.c b/arch/hexagon/kernel/asm-offsets.c
new file mode 100644
index 000000000..3980c0407
--- /dev/null
+++ b/arch/hexagon/kernel/asm-offsets.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ *
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kbuild.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+/* This file is used to produce asm/linkerscript constants from header
+ files typically used in c. Specifically, it generates asm-offsets.h */
+
+int main(void)
+{
+ COMMENT("This is a comment.");
+ /* might get these from somewhere else. */
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ BLANK();
+
+ COMMENT("Hexagon pt_regs definitions");
+ OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr);
+ OFFSET(_PT_GPUGP, pt_regs, gpugp);
+ OFFSET(_PT_CS1CS0, pt_regs, cs1cs0);
+ OFFSET(_PT_R3130, pt_regs, r3130);
+ OFFSET(_PT_R2928, pt_regs, r2928);
+ OFFSET(_PT_R2726, pt_regs, r2726);
+ OFFSET(_PT_R2524, pt_regs, r2524);
+ OFFSET(_PT_R2322, pt_regs, r2322);
+ OFFSET(_PT_R2120, pt_regs, r2120);
+ OFFSET(_PT_R1918, pt_regs, r1918);
+ OFFSET(_PT_R1716, pt_regs, r1716);
+ OFFSET(_PT_R1514, pt_regs, r1514);
+ OFFSET(_PT_R1312, pt_regs, r1312);
+ OFFSET(_PT_R1110, pt_regs, r1110);
+ OFFSET(_PT_R0908, pt_regs, r0908);
+ OFFSET(_PT_R0706, pt_regs, r0706);
+ OFFSET(_PT_R0504, pt_regs, r0504);
+ OFFSET(_PT_R0302, pt_regs, r0302);
+ OFFSET(_PT_R0100, pt_regs, r0100);
+ OFFSET(_PT_LC0SA0, pt_regs, lc0sa0);
+ OFFSET(_PT_LC1SA1, pt_regs, lc1sa1);
+ OFFSET(_PT_M1M0, pt_regs, m1m0);
+ OFFSET(_PT_PREDSUSR, pt_regs, predsusr);
+ OFFSET(_PT_EVREC, pt_regs, hvmer);
+ OFFSET(_PT_ER_VMEL, pt_regs, hvmer.vmel);
+ OFFSET(_PT_ER_VMEST, pt_regs, hvmer.vmest);
+ OFFSET(_PT_ER_VMPSP, pt_regs, hvmer.vmpsp);
+ OFFSET(_PT_ER_VMBADVA, pt_regs, hvmer.vmbadva);
+ DEFINE(_PT_REGS_SIZE, sizeof(struct pt_regs));
+ BLANK();
+
+ COMMENT("Hexagon thread_info definitions");
+ OFFSET(_THREAD_INFO_FLAGS, thread_info, flags);
+ OFFSET(_THREAD_INFO_PT_REGS, thread_info, regs);
+ OFFSET(_THREAD_INFO_SP, thread_info, sp);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ BLANK();
+
+ COMMENT("Hexagon hexagon_switch_stack definitions");
+ OFFSET(_SWITCH_R1716, hexagon_switch_stack, r1716);
+ OFFSET(_SWITCH_R1918, hexagon_switch_stack, r1918);
+ OFFSET(_SWITCH_R2120, hexagon_switch_stack, r2120);
+ OFFSET(_SWITCH_R2322, hexagon_switch_stack, r2322);
+
+ OFFSET(_SWITCH_R2524, hexagon_switch_stack, r2524);
+ OFFSET(_SWITCH_R2726, hexagon_switch_stack, r2726);
+ OFFSET(_SWITCH_FP, hexagon_switch_stack, fp);
+ OFFSET(_SWITCH_LR, hexagon_switch_stack, lr);
+ DEFINE(_SWITCH_STACK_SIZE, sizeof(struct hexagon_switch_stack));
+ BLANK();
+
+ COMMENT("Hexagon task_struct definitions");
+ OFFSET(_TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(_TASK_STRUCT_THREAD, task_struct, thread);
+
+ COMMENT("Hexagon thread_struct definitions");
+ OFFSET(_THREAD_STRUCT_SWITCH_SP, thread_struct, switch_sp);
+
+ return 0;
+}
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
new file mode 100644
index 000000000..7ebe7ad19
--- /dev/null
+++ b/arch/hexagon/kernel/dma.c
@@ -0,0 +1,219 @@
+/*
+ * DMA implementation for Hexagon
+ *
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
+#include <linux/bootmem.h>
+#include <linux/genalloc.h>
+#include <asm/dma-mapping.h>
+#include <linux/module.h>
+#include <asm/page.h>
+
+#define HEXAGON_MAPPING_ERROR 0
+
+const struct dma_map_ops *dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
+static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
+{
+ return phys_to_virt((unsigned long) dma_addr);
+}
+
+static struct gen_pool *coherent_pool;
+
+
+/* Allocates from a pool of uncached memory that was reserved at boot time */
+
+static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ unsigned long attrs)
+{
+ void *ret;
+
+ /*
+ * Our max_low_pfn should have been backed off by 16MB in
+ * mm/init.c to create DMA coherent space. Use that as the VA
+ * for the pool.
+ */
+
+ if (coherent_pool == NULL) {
+ coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
+
+ if (coherent_pool == NULL)
+ panic("Can't create %s() memory pool!", __func__);
+ else
+ gen_pool_add(coherent_pool,
+ (unsigned long)pfn_to_virt(max_low_pfn),
+ hexagon_coherent_pool_size, -1);
+ }
+
+ ret = (void *) gen_pool_alloc(coherent_pool, size);
+
+ if (ret) {
+ memset(ret, 0, size);
+ *dma_addr = (dma_addr_t) virt_to_phys(ret);
+ } else
+ *dma_addr = ~0;
+
+ return ret;
+}
+
+static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
+}
+
+static int check_addr(const char *name, struct device *hwdev,
+ dma_addr_t bus, size_t size)
+{
+ if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
+ if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
+ printk(KERN_ERR
+ "%s: overflow %Lx+%zu of device mask %Lx\n",
+ name, (long long)bus, size,
+ (long long)*hwdev->dma_mask);
+ return 0;
+ }
+ return 1;
+}
+
+static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = sg_phys(s);
+ if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
+ return 0;
+
+ s->dma_length = s->length;
+
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ continue;
+
+ flush_dcache_range(dma_addr_to_virt(s->dma_address),
+ dma_addr_to_virt(s->dma_address + s->length));
+ }
+
+ return nents;
+}
+
+/*
+ * address is virtual
+ */
+static inline void dma_sync(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ hexagon_clean_dcache_range((unsigned long) addr,
+ (unsigned long) addr + size);
+ break;
+ case DMA_FROM_DEVICE:
+ hexagon_inv_dcache_range((unsigned long) addr,
+ (unsigned long) addr + size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ flush_dcache_range((unsigned long) addr,
+ (unsigned long) addr + size);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * hexagon_map_page() - maps an address for device DMA
+ * @dev: pointer to DMA device
+ * @page: pointer to page struct of DMA memory
+ * @offset: offset within page
+ * @size: size of memory to map
+ * @dir: transfer direction
+ * @attrs: pointer to DMA attrs (not used)
+ *
+ * Called to map a memory address to a DMA address prior
+ * to accesses to/from device.
+ *
+ * We don't particularly have many hoops to jump through
+ * so far. Straight translation between phys and virtual.
+ *
+ * DMA is not cache coherent so sync is necessary; this
+ * seems to be a convenient place to do it.
+ *
+ */
+static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t bus = page_to_phys(page) + offset;
+ WARN_ON(size == 0);
+
+ if (!check_addr("map_single", dev, bus, size))
+ return HEXAGON_MAPPING_ERROR;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_sync(dma_addr_to_virt(bus), size, dir);
+
+ return bus;
+}
+
+static void hexagon_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync(dma_addr_to_virt(dma_handle), size, dir);
+}
+
+static void hexagon_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync(dma_addr_to_virt(dma_handle), size, dir);
+}
+
+static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == HEXAGON_MAPPING_ERROR;
+}
+
+const struct dma_map_ops hexagon_dma_ops = {
+ .alloc = hexagon_dma_alloc_coherent,
+ .free = hexagon_free_coherent,
+ .map_sg = hexagon_map_sg,
+ .map_page = hexagon_map_page,
+ .sync_single_for_cpu = hexagon_sync_single_for_cpu,
+ .sync_single_for_device = hexagon_sync_single_for_device,
+ .mapping_error = hexagon_mapping_error,
+};
+
+void __init hexagon_dma_init(void)
+{
+ if (dma_ops)
+ return;
+
+ dma_ops = &hexagon_dma_ops;
+}
diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S
new file mode 100644
index 000000000..b9b63d085
--- /dev/null
+++ b/arch/hexagon/kernel/head.S
@@ -0,0 +1,237 @@
+/*
+ * Early kernel startup code for Hexagon
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/asm-offsets.h>
+#include <asm/mem-layout.h>
+#include <asm/vm_mmu.h>
+#include <asm/page.h>
+#include <asm/hexagon_vm.h>
+
+#define SEGTABLE_ENTRIES #0x0e0
+
+ __INIT
+ENTRY(stext)
+ /*
+ * VMM will already have set up true vector page, MMU, etc.
+ * To set up initial kernel identity map, we have to pass
+ * the VMM a pointer to some canonical page tables. In
+ * this implementation, we're assuming that we've got
+ * them precompiled. Generate value in R24, as we'll need
+ * it again shortly.
+ */
+ r24.L = #LO(swapper_pg_dir)
+ r24.H = #HI(swapper_pg_dir)
+
+ /*
+ * Symbol is kernel segment address, but we need
+ * the logical/physical address.
+ */
+ r25 = pc;
+ r2.h = #0xffc0;
+ r2.l = #0x0000;
+ r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */
+ r1.h = #HI(PAGE_OFFSET);
+ r1.l = #LO(PAGE_OFFSET);
+ r24 = sub(r24,r1); /* swapper_pg_dir - PAGE_OFFSET */
+ r24 = add(r24,r25); /* + PHYS_OFFSET */
+
+ r0 = r24; /* aka __pa(swapper_pg_dir) */
+
+ /*
+ * Initialize page dir to make the virtual and physical
+ * addresses where the kernel was loaded be identical.
+ * Done in 4MB chunks.
+ */
+#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
+ | __HEXAGON_C_WB_L2 << 6 \
+ | __HVM_PDE_S_4MB)
+
+ /*
+ * Get number of VA=PA entries; only really needed for jump
+ * to hyperspace; gets blown away immediately after
+ */
+
+ {
+ r1.l = #LO(_end);
+ r2.l = #LO(stext);
+ r3 = #1;
+ }
+ {
+ r1.h = #HI(_end);
+ r2.h = #HI(stext);
+ r3 = asl(r3, #22);
+ }
+ {
+ r1 = sub(r1, r2);
+ r3 = add(r3, #-1);
+ } /* r1 = _end - stext */
+ r1 = add(r1, r3); /* + (4M-1) */
+ r26 = lsr(r1, #22); /* / 4M = # of entries */
+
+ r1 = r25;
+ r2.h = #0xffc0;
+ r2.l = #0x0000; /* round back down to 4MB boundary */
+ r1 = and(r1,r2);
+ r2 = lsr(r1, #22) /* 4MB page number */
+ r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
+ r0 = add(r0,r2) /* r0 = address of correct PTE */
+ r2 = #PTE_BITS
+ r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */
+ r2.h = #0x0040
+ r2.l = #0x0000 /* 4MB increments */
+ loop0(1f,r26);
+1:
+ memw(r0 ++ #4) = r1
+ { r1 = add(r1, r2); } :endloop0
+
+ /* Also need to overwrite the initial 0xc0000000 entries */
+ /* PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift) */
+ R1.H = #HI(PAGE_OFFSET >> (22 - 2))
+ R1.L = #LO(PAGE_OFFSET >> (22 - 2))
+
+ r0 = add(r1, r24); /* advance to 0xc0000000 entry */
+ r1 = r25;
+ r2.h = #0xffc0;
+ r2.l = #0x0000; /* round back down to 4MB boundary */
+ r1 = and(r1,r2); /* for huge page */
+ r2 = #PTE_BITS
+ r1 = add(r1,r2);
+ r2.h = #0x0040
+ r2.l = #0x0000 /* 4MB increments */
+
+ loop0(1f,SEGTABLE_ENTRIES);
+1:
+ memw(r0 ++ #4) = r1;
+ { r1 = add(r1,r2); } :endloop0
+
+ r0 = r24;
+
+ /*
+ * The subroutine wrapper around the virtual instruction touches
+ * no memory, so we should be able to use it even here.
+ * Note that in this version, R1 and R2 get "clobbered"; see
+ * vm_ops.S
+ */
+ r1 = #VM_TRANS_TYPE_TABLE
+ call __vmnewmap;
+
+ /* Jump into virtual address range. */
+
+ r31.h = #hi(__head_s_vaddr_target)
+ r31.l = #lo(__head_s_vaddr_target)
+ jumpr r31
+
+ /* Insert trippy space effects. */
+
+__head_s_vaddr_target:
+ /*
+ * Tear down VA=PA translation now that we are running
+ * in kernel virtual space.
+ */
+ r0 = #__HVM_PDE_S_INVALID
+
+ r1.h = #0xffc0;
+ r1.l = #0x0000;
+ r2 = r25; /* phys_offset */
+ r2 = and(r1,r2);
+
+ r1.l = #lo(swapper_pg_dir)
+ r1.h = #hi(swapper_pg_dir)
+ r2 = lsr(r2, #22) /* 4MB page number */
+ r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
+ r1 = add(r1,r2);
+ loop0(1f,r26)
+
+1:
+ {
+ memw(R1 ++ #4) = R0
+ }:endloop0
+
+ r0 = r24
+ r1 = #VM_TRANS_TYPE_TABLE
+ call __vmnewmap
+
+ /* Go ahead and install the trap0 return so angel calls work */
+ r0.h = #hi(_K_provisional_vec)
+ r0.l = #lo(_K_provisional_vec)
+ call __vmsetvec
+
+ /*
+ * OK, at this point we should start to be much more careful,
+ * we're going to enter C code and start touching memory
+ * in all sorts of places.
+ * This means:
+ * SGP needs to be OK
+ * Need to lock shared resources
+ * A bunch of other things that will cause
+ * all kinds of painful bugs
+ */
+
+ /*
+ * Stack pointer should be pointed at the init task's
+ * thread stack, which should have been declared in arch/init_task.c.
+ * So uhhhhh...
+ * It's accessible via the init_thread_union, which is a union
+ * of a thread_info struct and a stack; of course, the top
+ * of the stack is not for you. The end of the stack
+ * is simply init_thread_union + THREAD_SIZE.
+ */
+
+ {r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
+ {r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
+
+ /* initialize the register used to point to current_thread_info */
+ /* Fixme: THREADINFO_REG can't be R2 because of that memset thing. */
+ {r29 = add(r29,r0); THREADINFO_REG = r29; }
+
+ /* Hack: zero bss; */
+ { r0.L = #LO(__bss_start); r1 = #0; r2.l = #LO(__bss_stop); }
+ { r0.H = #HI(__bss_start); r2.h = #HI(__bss_stop); }
+
+ r2 = sub(r2,r0);
+ call memset;
+
+ /* Set PHYS_OFFSET; should be in R25 */
+#ifdef CONFIG_HEXAGON_PHYS_OFFSET
+ r0.l = #LO(__phys_offset);
+ r0.h = #HI(__phys_offset);
+ memw(r0) = r25;
+#endif
+
+ /* Time to make the doughnuts. */
+ call start_kernel
+
+ /*
+ * Should not reach here.
+ */
+1:
+ jump 1b
+
+.p2align PAGE_SHIFT
+ENTRY(external_cmdline_buffer)
+ .fill _PAGE_SIZE,1,0
+
+.data
+.p2align PAGE_SHIFT
+ENTRY(empty_zero_page)
+ .fill _PAGE_SIZE,1,0
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
new file mode 100644
index 000000000..d13d4a06e
--- /dev/null
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -0,0 +1,55 @@
+/*
+ * Export of symbols defined in assembly files and/or libgcc.
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <asm/hexagon_vm.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+
+/* Additional functions */
+EXPORT_SYMBOL(__clear_user_hexagon);
+EXPORT_SYMBOL(raw_copy_from_user);
+EXPORT_SYMBOL(raw_copy_to_user);
+EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__vmgetie);
+EXPORT_SYMBOL(__vmsetie);
+EXPORT_SYMBOL(__vmyield);
+EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+
+/* Additional variables */
+EXPORT_SYMBOL(__phys_offset);
+EXPORT_SYMBOL(_dflt_cache_att);
+
+#define DECLARE_EXPORT(name) \
+ extern void name(void); EXPORT_SYMBOL(name)
+
+/* Symbols found in libgcc that assorted kernel modules need */
+DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
+
+/* Additional functions */
+DECLARE_EXPORT(__divsi3);
+DECLARE_EXPORT(__modsi3);
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(csum_tcpudp_magic);
diff --git a/arch/hexagon/kernel/irq_cpu.c b/arch/hexagon/kernel/irq_cpu.c
new file mode 100644
index 000000000..85883e1fd
--- /dev/null
+++ b/arch/hexagon/kernel/irq_cpu.c
@@ -0,0 +1,90 @@
+/*
+ * First-level interrupt controller model for Hexagon.
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <asm/hexagon_vm.h>
+
+static void mask_irq(struct irq_data *data)
+{
+ __vmintop_locdis((long) data->irq);
+}
+
+static void mask_irq_num(unsigned int irq)
+{
+ __vmintop_locdis((long) irq);
+}
+
+static void unmask_irq(struct irq_data *data)
+{
+ __vmintop_locen((long) data->irq);
+}
+
+/* This is actually all we need for handle_fasteoi_irq */
+static void eoi_irq(struct irq_data *data)
+{
+ __vmintop_globen((long) data->irq);
+}
+
+/* Power mamangement wake call. We don't need this, however,
+ * if this is absent, then an -ENXIO error is returned to the
+ * msm_serial driver, and it fails to correctly initialize.
+ * This is a bug in the msm_serial driver, but, for now, we
+ * work around it here, by providing this bogus handler.
+ * XXX FIXME!!! remove this when msm_serial is fixed.
+ */
+static int set_wake(struct irq_data *data, unsigned int on)
+{
+ return 0;
+}
+
+static struct irq_chip hexagon_irq_chip = {
+ .name = "HEXAGON",
+ .irq_mask = mask_irq,
+ .irq_unmask = unmask_irq,
+ .irq_set_wake = set_wake,
+ .irq_eoi = eoi_irq
+};
+
+/**
+ * The hexagon core comes with a first-level interrupt controller
+ * with 32 total possible interrupts. When the core is embedded
+ * into different systems/platforms, it is typically wrapped by
+ * macro cells that provide one or more second-level interrupt
+ * controllers that are cascaded into one or more of the first-level
+ * interrupts handled here. The precise wiring of these other
+ * irqs varies from platform to platform, and are set up & configured
+ * in the platform-specific files.
+ *
+ * The first-level interrupt controller is wrapped by the VM, which
+ * virtualizes the interrupt controller for us. It provides a very
+ * simple, fast & efficient API, and so the fasteoi handler is
+ * appropriate for this case.
+ */
+void __init init_IRQ(void)
+{
+ int irq;
+
+ for (irq = 0; irq < HEXAGON_CPUINTS; irq++) {
+ mask_irq_num(irq);
+ irq_set_chip_and_handler(irq, &hexagon_irq_chip,
+ handle_fasteoi_irq);
+ }
+}
diff --git a/arch/hexagon/kernel/kgdb.c b/arch/hexagon/kernel/kgdb.c
new file mode 100644
index 000000000..16c24b22d
--- /dev/null
+++ b/arch/hexagon/kernel/kgdb.c
@@ -0,0 +1,259 @@
+/*
+ * arch/hexagon/kernel/kgdb.c - Hexagon KGDB Support
+ *
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+
+/* All registers are 4 bytes, for now */
+#define GDB_SIZEOF_REG 4
+
+/* The register names are used during printing of the regs;
+ * Keep these at three letters to pretty-print. */
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { " r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, r00)},
+ { " r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, r01)},
+ { " r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, r02)},
+ { " r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, r03)},
+ { " r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, r04)},
+ { " r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, r05)},
+ { " r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, r06)},
+ { " r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, r07)},
+ { " r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, r08)},
+ { " r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, r09)},
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, r10)},
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, r11)},
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, r12)},
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, r13)},
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, r14)},
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, r15)},
+ { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, r16)},
+ { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, r17)},
+ { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, r18)},
+ { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, r19)},
+ { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, r20)},
+ { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, r21)},
+ { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, r22)},
+ { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, r23)},
+ { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, r24)},
+ { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, r25)},
+ { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, r26)},
+ { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, r27)},
+ { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, r28)},
+ { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, r29)},
+ { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, r30)},
+ { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, r31)},
+
+ { "usr", GDB_SIZEOF_REG, offsetof(struct pt_regs, usr)},
+ { "preds", GDB_SIZEOF_REG, offsetof(struct pt_regs, preds)},
+ { " m0", GDB_SIZEOF_REG, offsetof(struct pt_regs, m0)},
+ { " m1", GDB_SIZEOF_REG, offsetof(struct pt_regs, m1)},
+ { "sa0", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa0)},
+ { "sa1", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa1)},
+ { "lc0", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc0)},
+ { "lc1", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc1)},
+ { " gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
+ { "ugp", GDB_SIZEOF_REG, offsetof(struct pt_regs, ugp)},
+ { "cs0", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs0)},
+ { "cs1", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs1)},
+ { "psp", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmpsp)},
+ { "elr", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmel)},
+ { "est", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmest)},
+ { "badva", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmbadva)},
+ { "restart_r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, restart_r0)},
+ { "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)},
+};
+
+struct kgdb_arch arch_kgdb_ops = {
+ /* trap0(#0xDB) 0x0cdb0054 */
+ .gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c},
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ *((unsigned long *) mem) = *((unsigned long *) ((void *)regs +
+ dbg_reg_def[regno].offset));
+
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ *((unsigned long *) ((void *)regs + dbg_reg_def[regno].offset)) =
+ *((unsigned long *) mem);
+
+ return 0;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ instruction_pointer(regs) = pc;
+}
+
+#ifdef CONFIG_SMP
+
+/**
+ * kgdb_roundup_cpus - Get other CPUs into a holding pattern
+ * @flags: Current IRQ state
+ *
+ * On SMP systems, we need to get the attention of the other CPUs
+ * and get them be in a known state. This should do what is needed
+ * to get the other CPUs to call kgdb_wait(). Note that on some arches,
+ * the NMI approach is not used for rounding up all the CPUs. For example,
+ * in case of MIPS, smp_call_function() is used to roundup CPUs. In
+ * this case, we have to make sure that interrupts are enabled before
+ * calling smp_call_function(). The argument to this function is
+ * the flags that will be used when restoring the interrupts. There is
+ * local_irq_save() call before kgdb_roundup_cpus().
+ *
+ * On non-SMP systems, this is not called.
+ */
+
+static void hexagon_kgdb_nmi_hook(void *ignored)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+#endif
+
+
+/* Not yet working */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+ struct task_struct *task)
+{
+ struct pt_regs *thread_regs;
+
+ if (task == NULL)
+ return;
+
+ /* Initialize to zero */
+ memset(gdb_regs, 0, NUMREGBYTES);
+
+ /* Otherwise, we have only some registers from switch_to() */
+ thread_regs = task_pt_regs(task);
+ gdb_regs[0] = thread_regs->r00;
+}
+
+/**
+ * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
+ * @vector: The error vector of the exception that happened.
+ * @signo: The signal number of the exception that happened.
+ * @err_code: The error code of the exception that happened.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ * @regs: The &struct pt_regs of the current process.
+ *
+ * This function MUST handle the 'c' and 's' command packets,
+ * as well packets to set / remove a hardware breakpoint, if used.
+ * If there are additional packets which the hardware needs to handle,
+ * they are handled here. The code should return -1 if it wants to
+ * process more packets, and a %0 or %1 if it wants to exit from the
+ * kgdb callback.
+ *
+ * Not yet working.
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *linux_regs)
+{
+ switch (remcom_in_buffer[0]) {
+ case 's':
+ case 'c':
+ return 0;
+ }
+ /* Stay in the debugger. */
+ return -1;
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ /* cpu roundup */
+ if (atomic_read(&kgdb_active) != -1) {
+ kgdb_nmicallback(smp_processor_id(), args->regs);
+ return NOTIFY_STOP;
+ }
+
+ if (user_mode(args->regs))
+ return NOTIFY_DONE;
+
+ if (kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err,
+ args->regs))
+ return NOTIFY_DONE;
+
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+
+ /*
+ * Lowest-prio notifier priority, we want to be notified last:
+ */
+ .priority = -INT_MAX,
+};
+
+/**
+ * kgdb_arch_init - Perform any architecture specific initialization.
+ *
+ * This function will handle the initialization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+/**
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/hexagon/kernel/module.c b/arch/hexagon/kernel/module.c
new file mode 100644
index 000000000..477d07a56
--- /dev/null
+++ b/arch/hexagon/kernel/module.c
@@ -0,0 +1,162 @@
+/*
+ * Kernel module loader for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <asm/module.h>
+#include <linux/elf.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/vmalloc.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt , ...)
+#endif
+
+/*
+ * module_frob_arch_sections - tweak got/plt sections.
+ * @hdr - pointer to elf header
+ * @sechdrs - pointer to elf load section headers
+ * @secstrings - symbol names
+ * @mod - pointer to module
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ unsigned int i;
+ int found = 0;
+
+ /* Look for .plt and/or .got.plt and/or .init.plt sections */
+ for (i = 0; i < hdr->e_shnum; i++) {
+ DEBUGP("Section %d is %s\n", i,
+ secstrings + sechdrs[i].sh_name);
+ if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
+ found = i+1;
+ if (strcmp(secstrings + sechdrs[i].sh_name, ".got.plt") == 0)
+ found = i+1;
+ if (strcmp(secstrings + sechdrs[i].sh_name, ".rela.plt") == 0)
+ found = i+1;
+ }
+
+ /* At this time, we don't support modules comiled with -shared */
+ if (found) {
+ printk(KERN_WARNING
+ "Module '%s' contains unexpected .plt/.got sections.\n",
+ mod->name);
+ /* return -ENOEXEC; */
+ }
+
+ return 0;
+}
+
+/*
+ * apply_relocate_add - perform rela relocations.
+ * @sechdrs - pointer to section headers
+ * @strtab - some sort of start address?
+ * @symindex - symbol index offset or something?
+ * @relsec - address to relocate to?
+ * @module - pointer to module
+ *
+ * Perform rela relocations.
+ */
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *module)
+{
+ unsigned int i;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ uint32_t value;
+ unsigned int nrelocs = sechdrs[relsec].sh_size / sizeof(Elf32_Rela);
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Word sym_info = sechdrs[relsec].sh_info;
+ Elf32_Sym *sym_base = (Elf32_Sym *) sechdrs[symindex].sh_addr;
+ void *loc_base = (void *) sechdrs[sym_info].sh_addr;
+
+ DEBUGP("Applying relocations in section %u to section %u base=%p\n",
+ relsec, sym_info, loc_base);
+
+ for (i = 0; i < nrelocs; i++) {
+
+ /* Symbol to relocate */
+ sym = sym_base + ELF32_R_SYM(rela[i].r_info);
+
+ /* Where to make the change */
+ location = loc_base + rela[i].r_offset;
+
+ /* `Everything is relative'. */
+ value = sym->st_value + rela[i].r_addend;
+
+ DEBUGP("%d: value=%08x loc=%p reloc=%d symbol=%s\n",
+ i, value, location, ELF32_R_TYPE(rela[i].r_info),
+ sym->st_name ?
+ &strtab[sym->st_name] : "(anonymous)");
+
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+ case R_HEXAGON_B22_PCREL: {
+ int dist = (int)(value - (uint32_t)location);
+ if ((dist < -0x00800000) ||
+ (dist >= 0x00800000)) {
+ printk(KERN_ERR
+ "%s: %s: %08x=%08x-%08x %s\n",
+ module->name,
+ "R_HEXAGON_B22_PCREL reloc out of range",
+ dist, value, (uint32_t)location,
+ sym->st_name ?
+ &strtab[sym->st_name] : "(anonymous)");
+ return -ENOEXEC;
+ }
+ DEBUGP("B22_PCREL contents: %08X.\n", *location);
+ *location &= ~0x01ff3fff;
+ *location |= 0x00003fff & dist;
+ *location |= 0x01ff0000 & (dist<<2);
+ DEBUGP("Contents after reloc: %08x\n", *location);
+ break;
+ }
+ case R_HEXAGON_HI16:
+ value = (value>>16) & 0xffff;
+ /* fallthrough */
+ case R_HEXAGON_LO16:
+ *location &= ~0x00c03fff;
+ *location |= value & 0x3fff;
+ *location |= (value & 0xc000) << 8;
+ break;
+ case R_HEXAGON_32:
+ *location = value;
+ break;
+ case R_HEXAGON_32_PCREL:
+ *location = value - (uint32_t)location;
+ break;
+ case R_HEXAGON_PLT_B22_PCREL:
+ case R_HEXAGON_GOTOFF_LO16:
+ case R_HEXAGON_GOTOFF_HI16:
+ printk(KERN_ERR "%s: GOT/PLT relocations unsupported\n",
+ module->name);
+ return -ENOEXEC;
+ default:
+ printk(KERN_ERR "%s: unknown relocation: %u\n",
+ module->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
new file mode 100644
index 000000000..656050c2e
--- /dev/null
+++ b/arch/hexagon/kernel/process.c
@@ -0,0 +1,213 @@
+/*
+ * Process creation support for Hexagon
+ *
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/tick.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/tracehook.h>
+
+/*
+ * Program thread launch. Often defined as a macro in processor.h,
+ * but we're shooting for a small footprint and it's not an inner-loop
+ * performance-critical operation.
+ *
+ * The Hexagon ABI specifies that R28 is zero'ed before program launch,
+ * so that gets automatically done here. If we ever stop doing that here,
+ * we'll probably want to define the ELF_PLAT_INIT macro.
+ */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ /* We want to zero all data-containing registers. Is this overkill? */
+ memset(regs, 0, sizeof(*regs));
+ /* We might want to also zero all Processor registers here */
+ pt_set_usermode(regs);
+ pt_set_elr(regs, pc);
+ pt_set_rte_sp(regs, sp);
+}
+
+/*
+ * Spin, or better still, do a hardware or VM wait instruction
+ * If hardware or VM offer wait termination even though interrupts
+ * are disabled.
+ */
+void arch_cpu_idle(void)
+{
+ __vmwait();
+ /* interrupts wake us up, but irqs are still disabled */
+ local_irq_enable();
+}
+
+/*
+ * Copy architecture-specific thread state
+ */
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p)
+{
+ struct thread_info *ti = task_thread_info(p);
+ struct hexagon_switch_stack *ss;
+ struct pt_regs *childregs;
+ asmlinkage void ret_from_fork(void);
+
+ childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
+ sizeof(*childregs));
+
+ ti->regs = childregs;
+
+ /*
+ * Establish kernel stack pointer and initial PC for new thread
+ * Note that unlike the usual situation, we do not copy the
+ * parent's callee-saved here; those are in pt_regs and whatever
+ * we leave here will be overridden on return to userland.
+ */
+ ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
+ sizeof(*ss));
+ ss->lr = (unsigned long)ret_from_fork;
+ p->thread.switch_sp = ss;
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ /* r24 <- fn, r25 <- arg */
+ ss->r24 = usp;
+ ss->r25 = arg;
+ pt_set_kmode(childregs);
+ return 0;
+ }
+ memcpy(childregs, current_pt_regs(), sizeof(*childregs));
+ ss->r2524 = 0;
+
+ if (usp)
+ pt_set_rte_sp(childregs, usp);
+
+ /* Child sees zero return value */
+ childregs->r00 = 0;
+
+ /*
+ * The clone syscall has the C signature:
+ * int [r0] clone(int flags [r0],
+ * void *child_frame [r1],
+ * void *parent_tid [r2],
+ * void *child_tid [r3],
+ * void *thread_control_block [r4]);
+ * ugp is used to provide TLS support.
+ */
+ if (clone_flags & CLONE_SETTLS)
+ childregs->ugp = childregs->r04;
+
+ /*
+ * Parent sees new pid -- not necessary, not even possible at
+ * this point in the fork process
+ * Might also want to set things like ti->addr_limit
+ */
+
+ return 0;
+}
+
+/*
+ * Release any architecture-specific resources locked by thread
+ */
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void)
+{
+}
+
+/*
+ * The "wait channel" terminology is archaic, but what we want
+ * is an identification of the point at which the scheduler
+ * was invoked by a blocked thread.
+ */
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, pc;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = (unsigned long)task_stack_page(p);
+ fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
+ do {
+ if (fp < (stack_page + sizeof(struct thread_info)) ||
+ fp >= (THREAD_SIZE - 8 + stack_page))
+ return 0;
+ pc = ((unsigned long *)fp)[1];
+ if (!in_sched_functions(pc))
+ return pc;
+ fp = *(unsigned long *) fp;
+ } while (count++ < 16);
+
+ return 0;
+}
+
+/*
+ * Required placeholder.
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+ return 0;
+}
+
+
+/*
+ * Called on the exit path of event entry; see vm_entry.S
+ *
+ * Interrupts will already be disabled.
+ *
+ * Returns 0 if there's no need to re-check for more work.
+ */
+
+int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
+{
+ if (!(thread_info_flags & _TIF_WORK_MASK)) {
+ return 0;
+ } /* shortcut -- no work to be done */
+
+ local_irq_enable();
+
+ if (thread_info_flags & _TIF_NEED_RESCHED) {
+ schedule();
+ return 1;
+ }
+
+ if (thread_info_flags & _TIF_SIGPENDING) {
+ do_signal(regs);
+ return 1;
+ }
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ return 1;
+ }
+
+ /* Should not even reach here */
+ panic("%s: bad thread_info flags 0x%08x\n", __func__,
+ thread_info_flags);
+}
diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
new file mode 100644
index 000000000..fa76493c1
--- /dev/null
+++ b/arch/hexagon/kernel/ptrace.c
@@ -0,0 +1,204 @@
+/*
+ * Ptrace support for Hexagon
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/user.h>
+#include <linux/elf.h>
+
+#include <asm/user.h>
+
+#if arch_has_single_step()
+/* Both called from ptrace_resume */
+void user_enable_single_step(struct task_struct *child)
+{
+ pt_set_singlestep(task_pt_regs(child));
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ pt_clr_singlestep(task_pt_regs(child));
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+#endif
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+ unsigned int dummy;
+ struct pt_regs *regs = task_pt_regs(target);
+
+
+ if (!regs)
+ return -EIO;
+
+ /* The general idea here is that the copyout must happen in
+ * exactly the same order in which the userspace expects these
+ * regs. Now, the sequence in userspace does not match the
+ * sequence in the kernel, so everything past the 32 gprs
+ * happens one at a time.
+ */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->r00, 0, 32*sizeof(unsigned long));
+
+#define ONEXT(KPT_REG, USR_REG) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
+ KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
+ offsetof(struct user_regs_struct, USR_REG) + \
+ sizeof(unsigned long));
+
+ /* Must be exactly same sequence as struct user_regs_struct */
+ ONEXT(&regs->sa0, sa0);
+ ONEXT(&regs->lc0, lc0);
+ ONEXT(&regs->sa1, sa1);
+ ONEXT(&regs->lc1, lc1);
+ ONEXT(&regs->m0, m0);
+ ONEXT(&regs->m1, m1);
+ ONEXT(&regs->usr, usr);
+ ONEXT(&regs->preds, p3_0);
+ ONEXT(&regs->gp, gp);
+ ONEXT(&regs->ugp, ugp);
+ ONEXT(&pt_elr(regs), pc);
+ dummy = pt_cause(regs);
+ ONEXT(&dummy, cause);
+ ONEXT(&pt_badva(regs), badva);
+#if CONFIG_HEXAGON_ARCH_VERSION >=4
+ ONEXT(&regs->cs0, cs0);
+ ONEXT(&regs->cs1, cs1);
+#endif
+
+ /* Pad the rest with zeros, if needed */
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ offsetof(struct user_regs_struct, pad1), -1);
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ unsigned long bucket;
+ struct pt_regs *regs = task_pt_regs(target);
+
+ if (!regs)
+ return -EIO;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->r00, 0, 32*sizeof(unsigned long));
+
+#define INEXT(KPT_REG, USR_REG) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
+ offsetof(struct user_regs_struct, USR_REG) + \
+ sizeof(unsigned long));
+
+ /* Must be exactly same sequence as struct user_regs_struct */
+ INEXT(&regs->sa0, sa0);
+ INEXT(&regs->lc0, lc0);
+ INEXT(&regs->sa1, sa1);
+ INEXT(&regs->lc1, lc1);
+ INEXT(&regs->m0, m0);
+ INEXT(&regs->m1, m1);
+ INEXT(&regs->usr, usr);
+ INEXT(&regs->preds, p3_0);
+ INEXT(&regs->gp, gp);
+ INEXT(&regs->ugp, ugp);
+ INEXT(&pt_elr(regs), pc);
+
+ /* CAUSE and BADVA aren't writeable. */
+ INEXT(&bucket, cause);
+ INEXT(&bucket, badva);
+
+#if CONFIG_HEXAGON_ARCH_VERSION >=4
+ INEXT(&regs->cs0, cs0);
+ INEXT(&regs->cs1, cs1);
+#endif
+
+ /* Ignore the rest, if needed */
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ offsetof(struct user_regs_struct, pad1), -1);
+
+ if (ret)
+ return ret;
+
+ /*
+ * This is special; SP is actually restored by the VM via the
+ * special event record which is set by the special trap.
+ */
+ regs->hvmer.vmpsp = regs->r29;
+ return 0;
+}
+
+enum hexagon_regset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset hexagon_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+};
+
+static const struct user_regset_view hexagon_user_view = {
+ .name = "hexagon",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = hexagon_regsets,
+ .e_flags = ELF_CORE_EFLAGS,
+ .n = ARRAY_SIZE(hexagon_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &hexagon_user_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+ /* Boilerplate - resolves to null inline if no HW single-step */
+ user_disable_single_step(child);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ return ptrace_request(child, request, addr, data);
+}
diff --git a/arch/hexagon/kernel/reset.c b/arch/hexagon/kernel/reset.c
new file mode 100644
index 000000000..76483c101
--- /dev/null
+++ b/arch/hexagon/kernel/reset.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/smp.h>
+#include <asm/hexagon_vm.h>
+
+void machine_power_off(void)
+{
+ smp_send_stop();
+ __vmstop();
+}
+
+void machine_halt(void)
+{
+}
+
+void machine_restart(char *cmd)
+{
+}
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/hexagon/kernel/screen_info.c b/arch/hexagon/kernel/screen_info.c
new file mode 100644
index 000000000..1e1ceb18b
--- /dev/null
+++ b/arch/hexagon/kernel/screen_info.c
@@ -0,0 +1,3 @@
+#include <linux/screen_info.h>
+
+struct screen_info screen_info;
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
new file mode 100644
index 000000000..dc8c7e75b
--- /dev/null
+++ b/arch/hexagon/kernel/setup.c
@@ -0,0 +1,150 @@
+/*
+ * Arch related setup for Hexagon
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/console.h>
+#include <linux/of_fdt.h>
+#include <asm/io.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
+#include <asm/hexagon_vm.h>
+#include <asm/vm_mmu.h>
+#include <asm/time.h>
+
+char cmd_line[COMMAND_LINE_SIZE];
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+
+int on_simulator;
+
+void calibrate_delay(void)
+{
+ loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
+}
+
+/*
+ * setup_arch - high level architectural setup routine
+ * @cmdline_p: pointer to pointer to command-line arguments
+ */
+
+void __init setup_arch(char **cmdline_p)
+{
+ char *p = &external_cmdline_buffer;
+
+ /*
+ * These will eventually be pulled in via either some hypervisor
+ * or devicetree description. Hardwiring for now.
+ */
+ pcycle_freq_mhz = 600;
+ thread_freq_mhz = 100;
+ sleep_clk_freq = 32000;
+
+ /*
+ * Set up event bindings to handle exceptions and interrupts.
+ */
+ __vmsetvec(_K_VM_event_vector);
+
+ printk(KERN_INFO "PHYS_OFFSET=0x%08lx\n", PHYS_OFFSET);
+
+ /*
+ * Simulator has a few differences from the hardware.
+ * For now, check uninitialized-but-mapped memory
+ * prior to invoking setup_arch_memory().
+ */
+ if (*(int *)((unsigned long)_end + 8) == 0x1f1f1f1f)
+ on_simulator = 1;
+ else
+ on_simulator = 0;
+
+ if (p[0] != '\0')
+ strlcpy(boot_command_line, p, COMMAND_LINE_SIZE);
+ else
+ strlcpy(boot_command_line, default_command_line,
+ COMMAND_LINE_SIZE);
+
+ /*
+ * boot_command_line and the value set up by setup_arch
+ * are both picked up by the init code. If no reason to
+ * make them different, pass the same pointer back.
+ */
+ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = cmd_line;
+
+ parse_early_param();
+
+ setup_arch_memory();
+
+#ifdef CONFIG_SMP
+ smp_start_cpus();
+#endif
+}
+
+/*
+ * Functions for dumping CPU info via /proc
+ * Probably should move to kernel/proc.c or something.
+ */
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+/*
+ * Eventually this will dump information about
+ * CPU properties like ISA level, TLB size, etc.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ int cpu = (unsigned long) v - 1;
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(cpu))
+ return 0;
+#endif
+
+ seq_printf(m, "processor\t: %d\n", cpu);
+ seq_printf(m, "model name\t: Hexagon Virtual Machine\n");
+ seq_printf(m, "BogoMips\t: %lu.%02lu\n",
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100);
+ seq_printf(m, "\n");
+ return 0;
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = &c_start,
+ .next = &c_next,
+ .stop = &c_stop,
+ .show = &show_cpuinfo,
+};
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
new file mode 100644
index 000000000..78aa7304a
--- /dev/null
+++ b/arch/hexagon/kernel/signal.c
@@ -0,0 +1,270 @@
+/*
+ * Signal support for Hexagon processor
+ *
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/registers.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <linux/uaccess.h>
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+#include <asm/signal.h>
+#include <asm/vdso.h>
+
+struct rt_sigframe {
+ unsigned long tramp[2];
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ size_t frame_size)
+{
+ unsigned long sp = sigsp(regs->r29, ksig);
+
+ return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1));
+}
+
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ unsigned long tmp;
+ int err = 0;
+
+ err |= copy_to_user(&sc->sc_regs.r0, &regs->r00,
+ 32*sizeof(unsigned long));
+
+ err |= __put_user(regs->sa0, &sc->sc_regs.sa0);
+ err |= __put_user(regs->lc0, &sc->sc_regs.lc0);
+ err |= __put_user(regs->sa1, &sc->sc_regs.sa1);
+ err |= __put_user(regs->lc1, &sc->sc_regs.lc1);
+ err |= __put_user(regs->m0, &sc->sc_regs.m0);
+ err |= __put_user(regs->m1, &sc->sc_regs.m1);
+ err |= __put_user(regs->usr, &sc->sc_regs.usr);
+ err |= __put_user(regs->preds, &sc->sc_regs.p3_0);
+ err |= __put_user(regs->gp, &sc->sc_regs.gp);
+ err |= __put_user(regs->ugp, &sc->sc_regs.ugp);
+#if CONFIG_HEXAGON_ARCH_VERSION >= 4
+ err |= __put_user(regs->cs0, &sc->sc_regs.cs0);
+ err |= __put_user(regs->cs1, &sc->sc_regs.cs1);
+#endif
+ tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc);
+ tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause);
+ tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva);
+
+ return err;
+}
+
+static int restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc)
+{
+ unsigned long tmp;
+ int err = 0;
+
+ err |= copy_from_user(&regs->r00, &sc->sc_regs.r0,
+ 32 * sizeof(unsigned long));
+
+ err |= __get_user(regs->sa0, &sc->sc_regs.sa0);
+ err |= __get_user(regs->lc0, &sc->sc_regs.lc0);
+ err |= __get_user(regs->sa1, &sc->sc_regs.sa1);
+ err |= __get_user(regs->lc1, &sc->sc_regs.lc1);
+ err |= __get_user(regs->m0, &sc->sc_regs.m0);
+ err |= __get_user(regs->m1, &sc->sc_regs.m1);
+ err |= __get_user(regs->usr, &sc->sc_regs.usr);
+ err |= __get_user(regs->preds, &sc->sc_regs.p3_0);
+ err |= __get_user(regs->gp, &sc->sc_regs.gp);
+ err |= __get_user(regs->ugp, &sc->sc_regs.ugp);
+#if CONFIG_HEXAGON_ARCH_VERSION >= 4
+ err |= __get_user(regs->cs0, &sc->sc_regs.cs0);
+ err |= __get_user(regs->cs1, &sc->sc_regs.cs1);
+#endif
+ err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp);
+
+ return err;
+}
+
+/*
+ * Setup signal stack frame with siginfo structure
+ */
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ int err = 0;
+ struct rt_sigframe __user *frame;
+ struct hexagon_vdso *vdso = current->mm->context.vdso;
+
+ frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe)))
+ return -EFAULT;
+
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ return -EFAULT;
+
+ /* The on-stack signal trampoline is no longer executed;
+ * however, the libgcc signal frame unwinding code checks for
+ * the presence of these two numeric magic values.
+ */
+ err |= __put_user(0x7800d166, &frame->tramp[0]);
+ err |= __put_user(0x5400c004, &frame->tramp[1]);
+ err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ err |= __save_altstack(&frame->uc.uc_stack, user_stack_pointer(regs));
+ if (err)
+ return -EFAULT;
+
+ /* Load r0/r1 pair with signumber/siginfo pointer... */
+ regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32)
+ | (unsigned long long)ksig->sig;
+ regs->r02 = (unsigned long) &frame->uc;
+ regs->r31 = (unsigned long) vdso->rt_signal_trampoline;
+ pt_psp(regs) = (unsigned long) frame;
+ pt_set_elr(regs, (unsigned long)ksig->ka.sa.sa_handler);
+
+ return 0;
+}
+
+/*
+ * Setup invocation of signal handler
+ */
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ int ret;
+
+ /*
+ * If we're handling a signal that aborted a system call,
+ * set up the error return value before adding the signal
+ * frame to the stack.
+ */
+
+ if (regs->syscall_nr >= 0) {
+ switch (regs->r00) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->r00 = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->r00 = -EINTR;
+ break;
+ }
+ /* Fall through */
+ case -ERESTARTNOINTR:
+ regs->r06 = regs->syscall_nr;
+ pt_set_elr(regs, pt_elr(regs) - 4);
+ regs->r00 = regs->restart_r0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Set up the stack frame; not doing the SA_SIGINFO thing. We
+ * only set up the rt_frame flavor.
+ */
+ /* If there was an error on setup, no signal was delivered. */
+ ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
+
+ signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
+}
+
+/*
+ * Called from return-from-event code.
+ */
+void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (!user_mode(regs))
+ return;
+
+ if (get_signal(&ksig)) {
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /*
+ * No (more) signals; if we came from a system call, handle the restart.
+ */
+
+ if (regs->syscall_nr >= 0) {
+ switch (regs->r00) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->r06 = regs->syscall_nr;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->r06 = __NR_restart_syscall;
+ break;
+ default:
+ goto no_restart;
+ }
+ pt_set_elr(regs, pt_elr(regs) - 4);
+ regs->r00 = regs->restart_r0;
+ }
+
+no_restart:
+ /* If there's no signal to deliver, put the saved sigmask back */
+ restore_saved_sigmask();
+}
+
+/*
+ * Architecture-specific wrappers for signal-related system calls
+ */
+
+asmlinkage int sys_rt_sigreturn(void)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe __user *frame;
+ sigset_t blocked;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ frame = (struct rt_sigframe __user *)pt_psp(regs);
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ /* Restore the user's stack as well */
+ pt_psp(regs) = regs->r29;
+
+ regs->syscall_nr = -1;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return regs->r00;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
new file mode 100644
index 000000000..5dbc15549
--- /dev/null
+++ b/arch/hexagon/kernel/smp.c
@@ -0,0 +1,267 @@
+/*
+ * SMP support for Hexagon
+ *
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/sched/mm.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/mm_types.h>
+
+#include <asm/time.h> /* timer_interrupt */
+#include <asm/hexagon_vm.h>
+
+#define BASE_IPI_IRQ 26
+
+/*
+ * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
+ * (which is prior to any of our smp_prepare_cpu crap), in order to set
+ * up the... per_cpu areas.
+ */
+
+struct ipi_data {
+ unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
+ int cpu)
+{
+ unsigned long msg = 0;
+ do {
+ msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
+
+ switch (msg) {
+
+ case IPI_TIMER:
+ ipi_timer();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ /*
+ * call vmstop()
+ */
+ __vmstop();
+ break;
+
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+}
+
+/* Used for IPI call from other CPU's to unmask int */
+void smp_vm_unmask_irq(void *info)
+{
+ __vmintop_locen((long) info);
+}
+
+
+/*
+ * This is based on Alpha's IPI stuff.
+ * Supposed to take (int, void*) as args now.
+ * Specifically, first arg is irq, second is the irq_desc.
+ */
+
+irqreturn_t handle_ipi(int irq, void *desc)
+{
+ int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned long ops;
+
+ while ((ops = xchg(&ipi->bits, 0)) != 0)
+ __handle_ipi(&ops, ipi, cpu);
+ return IRQ_HANDLED;
+}
+
+void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
+{
+ unsigned long flags;
+ unsigned long cpu;
+ unsigned long retval;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, cpumask) {
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+
+ set_bit(msg, &ipi->bits);
+ /* Possible barrier here */
+ retval = __vmintop_post(BASE_IPI_IRQ+cpu);
+
+ if (retval != 0) {
+ printk(KERN_ERR "interrupt %ld not configured?\n",
+ BASE_IPI_IRQ+cpu);
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+static struct irqaction ipi_intdesc = {
+ .handler = handle_ipi,
+ .flags = IRQF_TRIGGER_RISING,
+ .name = "ipi_handler"
+};
+
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+/*
+ * interrupts should already be disabled from the VM
+ * SP should already be correct; need to set THREADINFO_REG
+ * to point to current thread info
+ */
+
+void start_secondary(void)
+{
+ unsigned int cpu;
+ unsigned long thread_ptr;
+
+ /* Calculate thread_info pointer from stack pointer */
+ __asm__ __volatile__(
+ "%0 = SP;\n"
+ : "=r" (thread_ptr)
+ );
+
+ thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
+
+ __asm__ __volatile__(
+ QUOTED_THREADINFO_REG " = %0;\n"
+ :
+ : "r" (thread_ptr)
+ );
+
+ /* Set the memory struct */
+ mmgrab(&init_mm);
+ current->active_mm = &init_mm;
+
+ cpu = smp_processor_id();
+
+ setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc);
+
+ /* Register the clock_event dummy */
+ setup_percpu_clockdev();
+
+ printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
+
+ notify_cpu_starting(cpu);
+
+ set_cpu_online(cpu, true);
+
+ local_irq_enable();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+
+/*
+ * called once for each present cpu
+ * apparently starts up the CPU and then
+ * maintains control until "cpu_online(cpu)" is set.
+ */
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ struct thread_info *thread = (struct thread_info *)idle->stack;
+ void *stack_start;
+
+ thread->cpu = cpu;
+
+ /* Boot to the head. */
+ stack_start = ((void *) thread) + THREAD_SIZE;
+ __vmstart(start_secondary, stack_start);
+
+ while (!cpu_online(cpu))
+ barrier();
+
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * should eventually have some sort of machine
+ * descriptor that has this stuff
+ */
+
+ /* Right now, let's just fake it. */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+
+ /* Also need to register the interrupts for IPI */
+ if (max_cpus > 1)
+ setup_irq(BASE_IPI_IRQ, &ipi_intdesc);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ send_ipi(&targets, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ send_ipi(mask, IPI_CALL_FUNC);
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+void smp_start_cpus(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpu_possible(i, true);
+}
diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c
new file mode 100644
index 000000000..ec4ef6829
--- /dev/null
+++ b/arch/hexagon/kernel/stacktrace.c
@@ -0,0 +1,65 @@
+/*
+ * Stacktrace support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long rets;
+};
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long low, high;
+ unsigned long fp;
+ struct stackframe *frame;
+ int skip = trace->skip;
+
+ low = (unsigned long)task_stack_page(current);
+ high = low + THREAD_SIZE;
+ fp = (unsigned long)__builtin_frame_address(0);
+
+ while (fp >= low && fp <= (high - sizeof(*frame))) {
+ frame = (struct stackframe *)fp;
+
+ if (skip) {
+ skip--;
+ } else {
+ trace->entries[trace->nr_entries++] = frame->rets;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+
+ /*
+ * The next frame must be at a higher address than the
+ * current frame.
+ */
+ low = fp + sizeof(*frame);
+ fp = frame->fp;
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/hexagon/kernel/syscalltab.c b/arch/hexagon/kernel/syscalltab.c
new file mode 100644
index 000000000..7024b1ddc
--- /dev/null
+++ b/arch/hexagon/kernel/syscalltab.c
@@ -0,0 +1,32 @@
+/*
+ * System call table for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscall.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+#include <asm/unistd.h>
+};
diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
new file mode 100644
index 000000000..29b1f5711
--- /dev/null
+++ b/arch/hexagon/kernel/time.c
@@ -0,0 +1,242 @@
+/*
+ * Time related functions for Hexagon architecture
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+
+#include <asm/timer-regs.h>
+#include <asm/hexagon_vm.h>
+
+/*
+ * For the clocksource we need:
+ * pcycle frequency (600MHz)
+ * For the loops_per_jiffy we need:
+ * thread/cpu frequency (100MHz)
+ * And for the timer, we need:
+ * sleep clock rate
+ */
+
+cycles_t pcycle_freq_mhz;
+cycles_t thread_freq_mhz;
+cycles_t sleep_clk_freq;
+
+static struct resource rtos_timer_resources[] = {
+ {
+ .start = RTOS_TIMER_REGS_ADDR,
+ .end = RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device rtos_timer_device = {
+ .name = "rtos_timer",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtos_timer_resources),
+ .resource = rtos_timer_resources,
+};
+
+/* A lot of this stuff should move into a platform specific section. */
+struct adsp_hw_timer_struct {
+ u32 match; /* Match value */
+ u32 count;
+ u32 enable; /* [1] - CLR_ON_MATCH_EN, [0] - EN */
+ u32 clear; /* one-shot register that clears the count */
+};
+
+/* Look for "TCX0" for related constants. */
+static __iomem struct adsp_hw_timer_struct *rtos_timer;
+
+static u64 timer_get_cycles(struct clocksource *cs)
+{
+ return (u64) __vmgettime();
+}
+
+static struct clocksource hexagon_clocksource = {
+ .name = "pcycles",
+ .rating = 250,
+ .read = timer_get_cycles,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int set_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ /* Assuming the timer will be disabled when we enter here. */
+
+ iowrite32(1, &rtos_timer->clear);
+ iowrite32(0, &rtos_timer->clear);
+
+ iowrite32(delta, &rtos_timer->match);
+ iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+/* Broadcast mechanism */
+static void broadcast(const struct cpumask *mask)
+{
+ send_ipi(mask, IPI_TIMER);
+}
+#endif
+
+/* XXX Implement set_state_shutdown() */
+static struct clock_event_device hexagon_clockevent_dev = {
+ .name = "clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 400,
+ .irq = RTOS_TIMER_INT,
+ .set_next_event = set_next_event,
+#ifdef CONFIG_SMP
+ .broadcast = broadcast,
+#endif
+};
+
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct clock_event_device, clock_events);
+
+void setup_percpu_clockdev(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
+ struct clock_event_device *dummy_clock_dev =
+ &per_cpu(clock_events, cpu);
+
+ memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
+ INIT_LIST_HEAD(&dummy_clock_dev->list);
+
+ dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
+ dummy_clock_dev->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(dummy_clock_dev);
+}
+
+/* Called from smp.c for each CPU's timer ipi call */
+void ipi_timer(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
+
+ ce_dev->event_handler(ce_dev);
+}
+#endif /* CONFIG_SMP */
+
+static irqreturn_t timer_interrupt(int irq, void *devid)
+{
+ struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
+
+ iowrite32(0, &rtos_timer->enable);
+ ce_dev->event_handler(ce_dev);
+
+ return IRQ_HANDLED;
+}
+
+/* This should also be pulled from devtree */
+static struct irqaction rtos_timer_intdesc = {
+ .handler = timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_TRIGGER_RISING,
+ .name = "rtos_timer"
+};
+
+/*
+ * time_init_deferred - called by start_kernel to set up timer/clock source
+ *
+ * Install the IRQ handler for the clock, setup timers.
+ * This is done late, as that way, we can use ioremap().
+ *
+ * This runs just before the delay loop is calibrated, and
+ * is used for delay calibration.
+ */
+void __init time_init_deferred(void)
+{
+ struct resource *resource = NULL;
+ struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
+
+ ce_dev->cpumask = cpu_all_mask;
+
+ if (!resource)
+ resource = rtos_timer_device.resource;
+
+ /* ioremap here means this has to run later, after paging init */
+ rtos_timer = ioremap(resource->start, resource_size(resource));
+
+ if (!rtos_timer) {
+ release_mem_region(resource->start, resource_size(resource));
+ }
+ clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);
+
+ /* Note: the sim generic RTOS clock is apparently really 18750Hz */
+
+ /*
+ * Last arg is some guaranteed seconds for which the conversion will
+ * work without overflow.
+ */
+ clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);
+
+ ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
+ ce_dev->max_delta_ticks = 0x7fffffff;
+ ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
+ ce_dev->min_delta_ticks = 0xf;
+
+#ifdef CONFIG_SMP
+ setup_percpu_clockdev();
+#endif
+
+ clockevents_register_device(ce_dev);
+ setup_irq(ce_dev->irq, &rtos_timer_intdesc);
+}
+
+void __init time_init(void)
+{
+ late_time_init = time_init_deferred;
+}
+
+void __delay(unsigned long cycles)
+{
+ unsigned long long start = __vmgettime();
+
+ while ((__vmgettime() - start) < cycles)
+ cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+/*
+ * This could become parametric or perhaps even computed at run-time,
+ * but for now we take the observed simulator jitter.
+ */
+static long long fudgefactor = 350; /* Maybe lower if kernel optimized. */
+
+void __udelay(unsigned long usecs)
+{
+ unsigned long long start = __vmgettime();
+ unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;
+
+ while ((__vmgettime() - start) < finish)
+ cpu_relax(); /* not sure how this improves readability */
+}
+EXPORT_SYMBOL(__udelay);
diff --git a/arch/hexagon/kernel/trampoline.S b/arch/hexagon/kernel/trampoline.S
new file mode 100644
index 000000000..18110a905
--- /dev/null
+++ b/arch/hexagon/kernel/trampoline.S
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * Trampoline sequences to be copied onto user stack.
+ * This consumes a little more space than hand-assembling
+ * immediate constants for use in C, but is more portable
+ * to future tweaks to the Hexagon instruction set.
+ */
+
+#include <asm/unistd.h>
+
+/* Sig trampolines - call sys_sigreturn or sys_rt_sigreturn as appropriate */
+
+/* plain sigreturn is gone. */
+
+ .globl __rt_sigtramp_template
+__rt_sigtramp_template:
+ r6 = #__NR_rt_sigreturn;
+ trap0(#1);
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
new file mode 100644
index 000000000..91ee04842
--- /dev/null
+++ b/arch/hexagon/kernel/traps.c
@@ -0,0 +1,453 @@
+/*
+ * Kernel traps/events for Hexagon processor
+ *
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/kdebug.h>
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/tracehook.h>
+#include <asm/traps.h>
+#include <asm/vm_fault.h>
+#include <asm/syscall.h>
+#include <asm/registers.h>
+#include <asm/unistd.h>
+#include <asm/sections.h>
+#ifdef CONFIG_KGDB
+# include <linux/kgdb.h>
+#endif
+
+#define TRAP_SYSCALL 1
+#define TRAP_DEBUG 0xdb
+
+void __init trap_init(void)
+{
+}
+
+#ifdef CONFIG_GENERIC_BUG
+/* Maybe should resemble arch/sh/kernel/traps.c ?? */
+int is_valid_bugaddr(unsigned long addr)
+{
+ return 1;
+}
+#endif /* CONFIG_GENERIC_BUG */
+
+static const char *ex_name(int ex)
+{
+ switch (ex) {
+ case HVM_GE_C_XPROT:
+ case HVM_GE_C_XUSER:
+ return "Execute protection fault";
+ case HVM_GE_C_RPROT:
+ case HVM_GE_C_RUSER:
+ return "Read protection fault";
+ case HVM_GE_C_WPROT:
+ case HVM_GE_C_WUSER:
+ return "Write protection fault";
+ case HVM_GE_C_XMAL:
+ return "Misaligned instruction";
+ case HVM_GE_C_WREG:
+ return "Multiple writes to same register in packet";
+ case HVM_GE_C_PCAL:
+ return "Program counter values that are not properly aligned";
+ case HVM_GE_C_RMAL:
+ return "Misaligned data load";
+ case HVM_GE_C_WMAL:
+ return "Misaligned data store";
+ case HVM_GE_C_INVI:
+ case HVM_GE_C_PRIVI:
+ return "Illegal instruction";
+ case HVM_GE_C_BUS:
+ return "Precise bus error";
+ case HVM_GE_C_CACHE:
+ return "Cache error";
+
+ case 0xdb:
+ return "Debugger trap";
+
+ default:
+ return "Unrecognized exception";
+ }
+}
+
+static void do_show_stack(struct task_struct *task, unsigned long *fp,
+ unsigned long ip)
+{
+ int kstack_depth_to_print = 24;
+ unsigned long offset, size;
+ const char *name = NULL;
+ unsigned long *newfp;
+ unsigned long low, high;
+ char tmpstr[128];
+ char *modname;
+ int i;
+
+ if (task == NULL)
+ task = current;
+
+ printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n",
+ raw_smp_processor_id(), task->comm,
+ task_pid_nr(task));
+
+ if (fp == NULL) {
+ if (task == current) {
+ asm("%0 = r30" : "=r" (fp));
+ } else {
+ fp = (unsigned long *)
+ ((struct hexagon_switch_stack *)
+ task->thread.switch_sp)->fp;
+ }
+ }
+
+ if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
+ printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp);
+ return;
+ }
+
+ /* Saved link reg is one word above FP */
+ if (!ip)
+ ip = *(fp+1);
+
+ /* Expect kernel stack to be in-bounds */
+ low = (unsigned long)task_stack_page(task);
+ high = low + THREAD_SIZE - 8;
+ low += sizeof(struct thread_info);
+
+ for (i = 0; i < kstack_depth_to_print; i++) {
+
+ name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
+
+ printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name,
+ offset);
+ if (((unsigned long) fp < low) || (high < (unsigned long) fp))
+ printk(KERN_CONT " (FP out of bounds!)");
+ if (modname)
+ printk(KERN_CONT " [%s] ", modname);
+ printk(KERN_CONT "\n");
+
+ newfp = (unsigned long *) *fp;
+
+ if (((unsigned long) newfp) & 0x3) {
+ printk(KERN_INFO "-- Corrupt frame pointer %p\n",
+ newfp);
+ break;
+ }
+
+ /* Attempt to continue past exception. */
+ if (0 == newfp) {
+ struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
+ + 8);
+
+ if (regs->syscall_nr != -1) {
+ printk(KERN_INFO "-- trap0 -- syscall_nr: %ld",
+ regs->syscall_nr);
+ printk(KERN_CONT " psp: %lx elr: %lx\n",
+ pt_psp(regs), pt_elr(regs));
+ break;
+ } else {
+ /* really want to see more ... */
+ kstack_depth_to_print += 6;
+ printk(KERN_INFO "-- %s (0x%lx) badva: %lx\n",
+ ex_name(pt_cause(regs)), pt_cause(regs),
+ pt_badva(regs));
+ }
+
+ newfp = (unsigned long *) regs->r30;
+ ip = pt_elr(regs);
+ } else {
+ ip = *(newfp + 1);
+ }
+
+ /* If link reg is null, we are done. */
+ if (ip == 0x0)
+ break;
+
+ /* If newfp isn't larger, we're tracing garbage. */
+ if (newfp > fp)
+ fp = newfp;
+ else
+ break;
+ }
+}
+
+void show_stack(struct task_struct *task, unsigned long *fp)
+{
+ /* Saved link reg is one word above FP */
+ do_show_stack(task, fp, 0);
+}
+
+int die(const char *str, struct pt_regs *regs, long err)
+{
+ static struct {
+ spinlock_t lock;
+ int counter;
+ } die = {
+ .lock = __SPIN_LOCK_UNLOCKED(die.lock),
+ .counter = 0
+ };
+
+ console_verbose();
+ oops_enter();
+
+ spin_lock_irq(&die.lock);
+ bust_spinlocks(1);
+ printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
+
+ if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
+ NOTIFY_STOP)
+ return 1;
+
+ print_modules();
+ show_regs(regs);
+ do_show_stack(current, &regs->r30, pt_elr(regs));
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+
+ spin_unlock_irq(&die.lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ oops_exit();
+ do_exit(err);
+ return 0;
+}
+
+int die_if_kernel(char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs))
+ return die(str, regs, err);
+ else
+ return 0;
+}
+
+/*
+ * It's not clear that misaligned fetches are ever recoverable.
+ */
+static void misaligned_instruction(struct pt_regs *regs)
+{
+ die_if_kernel("Misaligned Instruction", regs, 0);
+ force_sig(SIGBUS, current);
+}
+
+/*
+ * Misaligned loads and stores, on the other hand, can be
+ * emulated, and probably should be, some day. But for now
+ * they will be considered fatal.
+ */
+static void misaligned_data_load(struct pt_regs *regs)
+{
+ die_if_kernel("Misaligned Data Load", regs, 0);
+ force_sig(SIGBUS, current);
+}
+
+static void misaligned_data_store(struct pt_regs *regs)
+{
+ die_if_kernel("Misaligned Data Store", regs, 0);
+ force_sig(SIGBUS, current);
+}
+
+static void illegal_instruction(struct pt_regs *regs)
+{
+ die_if_kernel("Illegal Instruction", regs, 0);
+ force_sig(SIGILL, current);
+}
+
+/*
+ * Precise bus errors may be recoverable with a a retry,
+ * but for now, treat them as irrecoverable.
+ */
+static void precise_bus_error(struct pt_regs *regs)
+{
+ die_if_kernel("Precise Bus Error", regs, 0);
+ force_sig(SIGBUS, current);
+}
+
+/*
+ * If anything is to be done here other than panic,
+ * it will probably be complex and migrate to another
+ * source module. For now, just die.
+ */
+static void cache_error(struct pt_regs *regs)
+{
+ die("Cache Error", regs, 0);
+}
+
+/*
+ * General exception handler
+ */
+void do_genex(struct pt_regs *regs)
+{
+ /*
+ * Decode Cause and Dispatch
+ */
+ switch (pt_cause(regs)) {
+ case HVM_GE_C_XPROT:
+ case HVM_GE_C_XUSER:
+ execute_protection_fault(regs);
+ break;
+ case HVM_GE_C_RPROT:
+ case HVM_GE_C_RUSER:
+ read_protection_fault(regs);
+ break;
+ case HVM_GE_C_WPROT:
+ case HVM_GE_C_WUSER:
+ write_protection_fault(regs);
+ break;
+ case HVM_GE_C_XMAL:
+ misaligned_instruction(regs);
+ break;
+ case HVM_GE_C_WREG:
+ illegal_instruction(regs);
+ break;
+ case HVM_GE_C_PCAL:
+ misaligned_instruction(regs);
+ break;
+ case HVM_GE_C_RMAL:
+ misaligned_data_load(regs);
+ break;
+ case HVM_GE_C_WMAL:
+ misaligned_data_store(regs);
+ break;
+ case HVM_GE_C_INVI:
+ case HVM_GE_C_PRIVI:
+ illegal_instruction(regs);
+ break;
+ case HVM_GE_C_BUS:
+ precise_bus_error(regs);
+ break;
+ case HVM_GE_C_CACHE:
+ cache_error(regs);
+ break;
+ default:
+ /* Halt and catch fire */
+ panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
+ break;
+ }
+}
+
+/* Indirect system call dispatch */
+long sys_syscall(void)
+{
+ printk(KERN_ERR "sys_syscall invoked!\n");
+ return -ENOSYS;
+}
+
+void do_trap0(struct pt_regs *regs)
+{
+ syscall_fn syscall;
+
+ switch (pt_cause(regs)) {
+ case TRAP_SYSCALL:
+ /* System call is trap0 #1 */
+
+ /* allow strace to catch syscall args */
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs)))
+ return; /* return -ENOSYS somewhere? */
+
+ /* Interrupts should be re-enabled for syscall processing */
+ __vmsetie(VM_INT_ENABLE);
+
+ /*
+ * System call number is in r6, arguments in r0..r5.
+ * Fortunately, no Linux syscall has more than 6 arguments,
+ * and Hexagon ABI passes first 6 arguments in registers.
+ * 64-bit arguments are passed in odd/even register pairs.
+ * Fortunately, we have no system calls that take more
+ * than three arguments with more than one 64-bit value.
+ * Should that change, we'd need to redesign to copy
+ * between user and kernel stacks.
+ */
+ regs->syscall_nr = regs->r06;
+
+ /*
+ * GPR R0 carries the first parameter, and is also used
+ * to report the return value. We need a backup of
+ * the user's value in case we need to do a late restart
+ * of the system call.
+ */
+ regs->restart_r0 = regs->r00;
+
+ if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
+ regs->r00 = -1;
+ } else {
+ syscall = (syscall_fn)
+ (sys_call_table[regs->syscall_nr]);
+ regs->r00 = syscall(regs->r00, regs->r01,
+ regs->r02, regs->r03,
+ regs->r04, regs->r05);
+ }
+
+ /* allow strace to get the syscall return state */
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
+ tracehook_report_syscall_exit(regs, 0);
+
+ break;
+ case TRAP_DEBUG:
+ /* Trap0 0xdb is debug breakpoint */
+ if (user_mode(regs)) {
+ /*
+ * Some architecures add some per-thread state
+ * to distinguish between breakpoint traps and
+ * trace traps. We may want to do that, and
+ * set the si_code value appropriately, or we
+ * may want to use a different trap0 flavor.
+ */
+ force_sig_fault(SIGTRAP, TRAP_BRKPT,
+ (void __user *) pt_elr(regs), current);
+ } else {
+#ifdef CONFIG_KGDB
+ kgdb_handle_exception(pt_cause(regs), SIGTRAP,
+ TRAP_BRKPT, regs);
+#endif
+ }
+ break;
+ }
+ /* Ignore other trap0 codes for now, especially 0 (Angel calls) */
+}
+
+/*
+ * Machine check exception handler
+ */
+void do_machcheck(struct pt_regs *regs)
+{
+ /* Halt and catch fire */
+ __vmstop();
+}
+
+/*
+ * Treat this like the old 0xdb trap.
+ */
+
+void do_debug_exception(struct pt_regs *regs)
+{
+ regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
+ regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
+ do_trap0(regs);
+}
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
new file mode 100644
index 000000000..3ea968415
--- /dev/null
+++ b/arch/hexagon/kernel/vdso.c
@@ -0,0 +1,101 @@
+/*
+ * vDSO implementation for Hexagon
+ *
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/binfmts.h>
+
+#include <asm/vdso.h>
+
+static struct page *vdso_page;
+
+/* Create a vDSO page holding the signal trampoline.
+ * We want this for a non-executable stack.
+ */
+static int __init vdso_init(void)
+{
+ struct hexagon_vdso *vdso;
+
+ vdso_page = alloc_page(GFP_KERNEL);
+ if (!vdso_page)
+ panic("Cannot allocate vdso");
+
+ vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
+ if (!vdso)
+ panic("Cannot map vdso");
+ clear_page(vdso);
+
+ /* Install the signal trampoline; currently looks like this:
+ * r6 = #__NR_rt_sigreturn;
+ * trap0(#1);
+ */
+ vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
+ vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
+
+ vunmap(vdso);
+
+ return 0;
+}
+arch_initcall(vdso_init);
+
+/*
+ * Called from binfmt_elf. Create a VMA for the vDSO page.
+ */
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ int ret;
+ unsigned long vdso_base;
+ struct mm_struct *mm = current->mm;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+ /* Try to get it loaded right near ld.so/glibc. */
+ vdso_base = STACK_TOP;
+
+ vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = vdso_base;
+ goto up_fail;
+ }
+
+ /* MAYWRITE to allow gdb to COW and set breakpoints. */
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ &vdso_page);
+
+ if (ret)
+ goto up_fail;
+
+ mm->context.vdso = (void *)vdso_base;
+
+up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+ return "[vdso]";
+ return NULL;
+}
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
new file mode 100644
index 000000000..9f4a73ff7
--- /dev/null
+++ b/arch/hexagon/kernel/vm_entry.S
@@ -0,0 +1,393 @@
+/*
+ * Event entry/exit for Hexagon
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <asm/asm-offsets.h> /* assembly-safer versions of C defines */
+#include <asm/mem-layout.h> /* sigh, except for page_offset */
+#include <asm/hexagon_vm.h>
+#include <asm/thread_info.h>
+
+/*
+ * Entry into guest-mode Linux under Hexagon Virtual Machine.
+ * Stack pointer points to event record - build pt_regs on top of it,
+ * set up a plausible C stack frame, and dispatch to the C handler.
+ * On return, do vmrte virtual instruction with SP where we started.
+ *
+ * VM Spec 0.5 uses a trap to fetch HVM record now.
+ */
+
+/*
+ * Save full register state, while setting up thread_info struct
+ * pointer derived from kernel stack pointer in THREADINFO_REG
+ * register, putting prior thread_info.regs pointer in a callee-save
+ * register (R24, which had better not ever be assigned to THREADINFO_REG),
+ * and updating thread_info.regs to point to current stack frame,
+ * so as to support nested events in kernel mode.
+ *
+ * As this is common code, we set the pt_regs system call number
+ * to -1 for all events. It will be replaced with the system call
+ * number in the case where we decode a system call (trap0(#1)).
+ */
+
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
+#define save_pt_regs()\
+ memd(R0 + #_PT_R3130) = R31:30; \
+ { memw(R0 + #_PT_R2928) = R28; \
+ R31 = memw(R0 + #_PT_ER_VMPSP); }\
+ { memw(R0 + #(_PT_R2928 + 4)) = R31; \
+ R31 = ugp; } \
+ { memd(R0 + #_PT_R2726) = R27:26; \
+ R30 = gp ; } \
+ memd(R0 + #_PT_R2524) = R25:24; \
+ memd(R0 + #_PT_R2322) = R23:22; \
+ memd(R0 + #_PT_R2120) = R21:20; \
+ memd(R0 + #_PT_R1918) = R19:18; \
+ memd(R0 + #_PT_R1716) = R17:16; \
+ memd(R0 + #_PT_R1514) = R15:14; \
+ memd(R0 + #_PT_R1312) = R13:12; \
+ { memd(R0 + #_PT_R1110) = R11:10; \
+ R15 = lc0; } \
+ { memd(R0 + #_PT_R0908) = R9:8; \
+ R14 = sa0; } \
+ { memd(R0 + #_PT_R0706) = R7:6; \
+ R13 = lc1; } \
+ { memd(R0 + #_PT_R0504) = R5:4; \
+ R12 = sa1; } \
+ { memd(R0 + #_PT_GPUGP) = R31:30; \
+ R11 = m1; \
+ R2.H = #HI(_THREAD_SIZE); } \
+ { memd(R0 + #_PT_LC0SA0) = R15:14; \
+ R10 = m0; \
+ R2.L = #LO(_THREAD_SIZE); } \
+ { memd(R0 + #_PT_LC1SA1) = R13:12; \
+ R15 = p3:0; \
+ R2 = neg(R2); } \
+ { memd(R0 + #_PT_M1M0) = R11:10; \
+ R14 = usr; \
+ R2 = and(R0,R2); } \
+ { memd(R0 + #_PT_PREDSUSR) = R15:14; \
+ THREADINFO_REG = R2; } \
+ { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
+ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
+ R2 = #-1; } \
+ { memw(R0 + #_PT_SYSCALL_NR) = R2; \
+ R30 = #0; }
+#else
+/* V4+ */
+/* the # ## # syntax inserts a literal ## */
+#define save_pt_regs()\
+ { memd(R0 + #_PT_R3130) = R31:30; \
+ R30 = memw(R0 + #_PT_ER_VMPSP); }\
+ { memw(R0 + #_PT_R2928) = R28; \
+ memw(R0 + #(_PT_R2928 + 4)) = R30; }\
+ { R31:30 = C11:10; \
+ memd(R0 + #_PT_R2726) = R27:26; \
+ memd(R0 + #_PT_R2524) = R25:24; }\
+ { memd(R0 + #_PT_R2322) = R23:22; \
+ memd(R0 + #_PT_R2120) = R21:20; }\
+ { memd(R0 + #_PT_R1918) = R19:18; \
+ memd(R0 + #_PT_R1716) = R17:16; }\
+ { memd(R0 + #_PT_R1514) = R15:14; \
+ memd(R0 + #_PT_R1312) = R13:12; \
+ R17:16 = C13:12; }\
+ { memd(R0 + #_PT_R1110) = R11:10; \
+ memd(R0 + #_PT_R0908) = R9:8; \
+ R15:14 = C1:0; } \
+ { memd(R0 + #_PT_R0706) = R7:6; \
+ memd(R0 + #_PT_R0504) = R5:4; \
+ R13:12 = C3:2; } \
+ { memd(R0 + #_PT_GPUGP) = R31:30; \
+ memd(R0 + #_PT_LC0SA0) = R15:14; \
+ R11:10 = C7:6; }\
+ { THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \
+ memd(R0 + #_PT_LC1SA1) = R13:12; \
+ R15 = p3:0; }\
+ { memd(R0 + #_PT_M1M0) = R11:10; \
+ memw(R0 + #_PT_PREDSUSR + 4) = R15; }\
+ { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
+ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
+ R2 = #-1; } \
+ { memw(R0 + #_PT_SYSCALL_NR) = R2; \
+ memd(R0 + #_PT_CS1CS0) = R17:16; \
+ R30 = #0; }
+#endif
+
+/*
+ * Restore registers and thread_info.regs state. THREADINFO_REG
+ * is assumed to still be sane, and R24 to have been correctly
+ * preserved. Don't restore R29 (SP) until later.
+ */
+
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
+#define restore_pt_regs() \
+ { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
+ R15:14 = memd(R0 + #_PT_PREDSUSR); } \
+ { R11:10 = memd(R0 + #_PT_M1M0); \
+ p3:0 = R15; } \
+ { R13:12 = memd(R0 + #_PT_LC1SA1); \
+ usr = R14; } \
+ { R15:14 = memd(R0 + #_PT_LC0SA0); \
+ m1 = R11; } \
+ { R3:2 = memd(R0 + #_PT_R0302); \
+ m0 = R10; } \
+ { R5:4 = memd(R0 + #_PT_R0504); \
+ lc1 = R13; } \
+ { R7:6 = memd(R0 + #_PT_R0706); \
+ sa1 = R12; } \
+ { R9:8 = memd(R0 + #_PT_R0908); \
+ lc0 = R15; } \
+ { R11:10 = memd(R0 + #_PT_R1110); \
+ sa0 = R14; } \
+ { R13:12 = memd(R0 + #_PT_R1312); \
+ R15:14 = memd(R0 + #_PT_R1514); } \
+ { R17:16 = memd(R0 + #_PT_R1716); \
+ R19:18 = memd(R0 + #_PT_R1918); } \
+ { R21:20 = memd(R0 + #_PT_R2120); \
+ R23:22 = memd(R0 + #_PT_R2322); } \
+ { R25:24 = memd(R0 + #_PT_R2524); \
+ R27:26 = memd(R0 + #_PT_R2726); } \
+ R31:30 = memd(R0 + #_PT_GPUGP); \
+ { R28 = memw(R0 + #_PT_R2928); \
+ ugp = R31; } \
+ { R31:30 = memd(R0 + #_PT_R3130); \
+ gp = R30; }
+#else
+/* V4+ */
+#define restore_pt_regs() \
+ { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
+ R15:14 = memd(R0 + #_PT_PREDSUSR); } \
+ { R11:10 = memd(R0 + #_PT_M1M0); \
+ R13:12 = memd(R0 + #_PT_LC1SA1); \
+ p3:0 = R15; } \
+ { R15:14 = memd(R0 + #_PT_LC0SA0); \
+ R3:2 = memd(R0 + #_PT_R0302); \
+ usr = R14; } \
+ { R5:4 = memd(R0 + #_PT_R0504); \
+ R7:6 = memd(R0 + #_PT_R0706); \
+ C7:6 = R11:10; }\
+ { R9:8 = memd(R0 + #_PT_R0908); \
+ R11:10 = memd(R0 + #_PT_R1110); \
+ C3:2 = R13:12; }\
+ { R13:12 = memd(R0 + #_PT_R1312); \
+ R15:14 = memd(R0 + #_PT_R1514); \
+ C1:0 = R15:14; }\
+ { R17:16 = memd(R0 + #_PT_R1716); \
+ R19:18 = memd(R0 + #_PT_R1918); } \
+ { R21:20 = memd(R0 + #_PT_R2120); \
+ R23:22 = memd(R0 + #_PT_R2322); } \
+ { R25:24 = memd(R0 + #_PT_R2524); \
+ R27:26 = memd(R0 + #_PT_R2726); } \
+ R31:30 = memd(R0 + #_PT_CS1CS0); \
+ { C13:12 = R31:30; \
+ R31:30 = memd(R0 + #_PT_GPUGP) ; \
+ R28 = memw(R0 + #_PT_R2928); }\
+ { C11:10 = R31:30; \
+ R31:30 = memd(R0 + #_PT_R3130); }
+#endif
+
+ /*
+ * Clears off enough space for the rest of pt_regs; evrec is a part
+ * of pt_regs in HVM mode. Save R0/R1, set handler's address in R1.
+ * R0 is the address of pt_regs and is the parameter to save_pt_regs.
+ */
+
+/*
+ * Since the HVM isn't automagically pushing the EVREC onto the stack anymore,
+ * we'll subract the entire size out and then fill it in ourselves.
+ * Need to save off R0, R1, R2, R3 immediately.
+ */
+
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
+#define vm_event_entry(CHandler) \
+ { \
+ R29 = add(R29, #-(_PT_REGS_SIZE)); \
+ memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
+ } \
+ { \
+ memd(R29 +#_PT_R0302) = R3:2; \
+ } \
+ trap1(#HVM_TRAP1_VMGETREGS); \
+ { \
+ memd(R29 + #_PT_ER_VMEL) = R1:0; \
+ R0 = R29; \
+ R1.L = #LO(CHandler); \
+ } \
+ { \
+ memd(R29 + #_PT_ER_VMPSP) = R3:2; \
+ R1.H = #HI(CHandler); \
+ jump event_dispatch; \
+ }
+#else
+/* V4+ */
+/* turn on I$ prefetch early */
+/* the # ## # syntax inserts a literal ## */
+#define vm_event_entry(CHandler) \
+ { \
+ R29 = add(R29, #-(_PT_REGS_SIZE)); \
+ memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
+ memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \
+ R0 = usr; \
+ } \
+ { \
+ memw(R29 + #_PT_PREDSUSR) = R0; \
+ R0 = setbit(R0, #16); \
+ } \
+ usr = R0; \
+ R1:0 = G1:0; \
+ { \
+ memd(R29 + #_PT_ER_VMEL) = R1:0; \
+ R1 = # ## #(CHandler); \
+ R3:2 = G3:2; \
+ } \
+ { \
+ R0 = R29; \
+ memd(R29 + #_PT_ER_VMPSP) = R3:2; \
+ jump event_dispatch; \
+ }
+#endif
+
+.text
+ /*
+ * Do bulk save/restore in one place.
+ * Adds a jump to dispatch latency, but
+ * saves hundreds of bytes.
+ */
+
+event_dispatch:
+ save_pt_regs()
+ callr r1
+
+ /*
+ * Coming back from the C-world, our thread info pointer
+ * should be in the designated register (usually R19)
+ *
+ * If we were in kernel mode, we don't need to check scheduler
+ * or signals if CONFIG_PREEMPT is not set. If set, then it has
+ * to jump to a need_resched kind of block.
+ * BTW, CONFIG_PREEMPT is not supported yet.
+ */
+
+#ifdef CONFIG_PREEMPT
+ R0 = #VM_INT_DISABLE
+ trap1(#HVM_TRAP1_VMSETIE)
+#endif
+
+ /* "Nested control path" -- if the previous mode was kernel */
+ {
+ R0 = memw(R29 + #_PT_ER_VMEST);
+ R26.L = #LO(do_work_pending);
+ }
+ {
+ P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
+ if (!P0.new) jump:nt restore_all;
+ R26.H = #HI(do_work_pending);
+ R0 = #VM_INT_DISABLE;
+ }
+
+ /*
+ * Check also the return from fork/system call, normally coming back from
+ * user mode
+ *
+ * R26 needs to have do_work_pending, and R0 should have VM_INT_DISABLE
+ */
+
+check_work_pending:
+ /* Disable interrupts while checking TIF */
+ trap1(#HVM_TRAP1_VMSETIE)
+ {
+ R0 = R29; /* regs should still be at top of stack */
+ R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
+ callr R26;
+ }
+
+ {
+ P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending;
+ R0 = #VM_INT_DISABLE;
+ }
+
+restore_all:
+ /*
+ * Disable interrupts, if they weren't already, before reg restore.
+ * R0 gets preloaded with #VM_INT_DISABLE before we get here.
+ */
+ trap1(#HVM_TRAP1_VMSETIE)
+
+ /* do the setregs here for VM 0.5 */
+ /* R29 here should already be pointing at pt_regs */
+ {
+ R1:0 = memd(R29 + #_PT_ER_VMEL);
+ R3:2 = memd(R29 + #_PT_ER_VMPSP);
+ }
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
+ trap1(#HVM_TRAP1_VMSETREGS);
+#else
+ G1:0 = R1:0;
+ G3:2 = R3:2;
+#endif
+
+ R0 = R29
+ restore_pt_regs()
+ {
+ R1:0 = memd(R29 + #_PT_R0100);
+ R29 = add(R29, #_PT_REGS_SIZE);
+ }
+ trap1(#HVM_TRAP1_VMRTE)
+ /* Notreached */
+
+
+ .globl _K_enter_genex
+_K_enter_genex:
+ vm_event_entry(do_genex)
+
+ .globl _K_enter_interrupt
+_K_enter_interrupt:
+ vm_event_entry(arch_do_IRQ)
+
+ .globl _K_enter_trap0
+_K_enter_trap0:
+ vm_event_entry(do_trap0)
+
+ .globl _K_enter_machcheck
+_K_enter_machcheck:
+ vm_event_entry(do_machcheck)
+
+ .globl _K_enter_debug
+_K_enter_debug:
+ vm_event_entry(do_debug_exception)
+
+ .globl ret_from_fork
+ret_from_fork:
+ {
+ call schedule_tail
+ R26.H = #HI(do_work_pending);
+ }
+ {
+ P0 = cmp.eq(R24, #0);
+ R26.L = #LO(do_work_pending);
+ R0 = #VM_INT_DISABLE;
+ }
+ if (P0) jump check_work_pending
+ {
+ R0 = R25;
+ callr R24
+ }
+ {
+ jump check_work_pending
+ R0 = #VM_INT_DISABLE;
+ }
diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c
new file mode 100644
index 000000000..04f57ef22
--- /dev/null
+++ b/arch/hexagon/kernel/vm_events.c
@@ -0,0 +1,105 @@
+/*
+ * Mostly IRQ support for Hexagon
+ *
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched/debug.h>
+#include <asm/registers.h>
+#include <linux/irq.h>
+#include <linux/hardirq.h>
+
+/*
+ * show_regs - print pt_regs structure
+ * @regs: pointer to pt_regs
+ *
+ * To-do: add all the accessor definitions to registers.h
+ *
+ * Will make this routine a lot easier to write.
+ */
+void show_regs(struct pt_regs *regs)
+{
+ show_regs_print_info(KERN_EMERG);
+
+ printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n",
+ regs->restart_r0, regs->syscall_nr);
+ printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds);
+ printk(KERN_EMERG "lc0: \t0x%08lx sa0: 0x%08lx m0: 0x%08lx\n",
+ regs->lc0, regs->sa0, regs->m0);
+ printk(KERN_EMERG "lc1: \t0x%08lx sa1: 0x%08lx m1: 0x%08lx\n",
+ regs->lc1, regs->sa1, regs->m1);
+ printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n",
+ regs->gp, regs->ugp, regs->usr);
+ printk(KERN_EMERG "cs0: \t0x%08lx cs1: 0x%08lx\n",
+ regs->cs0, regs->cs1);
+ printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00,
+ regs->r01,
+ regs->r02,
+ regs->r03);
+ printk(KERN_EMERG "r4: \t0x%08lx %08lx %08lx %08lx\n", regs->r04,
+ regs->r05,
+ regs->r06,
+ regs->r07);
+ printk(KERN_EMERG "r8: \t0x%08lx %08lx %08lx %08lx\n", regs->r08,
+ regs->r09,
+ regs->r10,
+ regs->r11);
+ printk(KERN_EMERG "r12: \t0x%08lx %08lx %08lx %08lx\n", regs->r12,
+ regs->r13,
+ regs->r14,
+ regs->r15);
+ printk(KERN_EMERG "r16: \t0x%08lx %08lx %08lx %08lx\n", regs->r16,
+ regs->r17,
+ regs->r18,
+ regs->r19);
+ printk(KERN_EMERG "r20: \t0x%08lx %08lx %08lx %08lx\n", regs->r20,
+ regs->r21,
+ regs->r22,
+ regs->r23);
+ printk(KERN_EMERG "r24: \t0x%08lx %08lx %08lx %08lx\n", regs->r24,
+ regs->r25,
+ regs->r26,
+ regs->r27);
+ printk(KERN_EMERG "r28: \t0x%08lx %08lx %08lx %08lx\n", regs->r28,
+ regs->r29,
+ regs->r30,
+ regs->r31);
+
+ printk(KERN_EMERG "elr: \t0x%08lx cause: 0x%08lx user_mode: %d\n",
+ pt_elr(regs), pt_cause(regs), user_mode(regs));
+ printk(KERN_EMERG "psp: \t0x%08lx badva: 0x%08lx int_enabled: %d\n",
+ pt_psp(regs), pt_badva(regs), ints_enabled(regs));
+}
+
+void dummy_handler(struct pt_regs *regs)
+{
+ unsigned int elr = pt_elr(regs);
+ printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr);
+}
+
+
+void arch_do_IRQ(struct pt_regs *regs)
+{
+ int irq = pt_cause(regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ generic_handle_irq(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
diff --git a/arch/hexagon/kernel/vm_init_segtable.S b/arch/hexagon/kernel/vm_init_segtable.S
new file mode 100644
index 000000000..80967f219
--- /dev/null
+++ b/arch/hexagon/kernel/vm_init_segtable.S
@@ -0,0 +1,442 @@
+/*
+ * Initial page table for Linux kernel under Hexagon VM,
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * These tables are pre-computed and linked into kernel.
+ */
+
+#include <asm/vm_mmu.h>
+/* #include <asm/iomap.h> */
+
+/*
+ * Start with mapping PA=0 to both VA=0x0 and VA=0xc000000 as 16MB large pages.
+ * No user mode access, RWX, write-back cache. The entry needs
+ * to be replicated for all 4 virtual segments mapping to the page.
+ */
+
+/* "Big Kernel Page" */
+#define BKP(pa) (((pa) & __HVM_PTE_PGMASK_4MB) \
+ | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
+ | __HEXAGON_C_WB_L2 << 6 \
+ | __HVM_PDE_S_16MB)
+
+/* No cache version */
+
+#define BKPG_IO(pa) (((pa) & __HVM_PTE_PGMASK_16MB) \
+ | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
+ | __HVM_PDE_S_16MB | __HEXAGON_C_DEV << 6 )
+
+#define FOURK_IO(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
+ | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
+ | __HEXAGON_C_DEV << 6 )
+
+#define L2_PTR(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
+ | __HVM_PDE_S_4KB )
+
+#define X __HVM_PDE_S_INVALID
+
+ .p2align 12
+ .globl swapper_pg_dir
+ .globl _K_init_segtable
+swapper_pg_dir:
+/* VA 0x00000000 */
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+/* VA 0x40000000 */
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+/* VA 0x80000000 */
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+/*0xa8*/.word X,X,X,X
+#ifdef CONFIG_COMET_EARLY_UART_DEBUG
+UART_PTE_ENTRY:
+/*0xa9*/.word BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000)
+#else
+/*0xa9*/.word X,X,X,X
+#endif
+/*0xaa*/.word X,X,X,X
+/*0xab*/.word X,X,X,X
+/*0xac*/.word X,X,X,X
+/*0xad*/.word X,X,X,X
+/*0xae*/.word X,X,X,X
+/*0xaf*/.word X,X,X,X
+/*0xb0*/.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+ .word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
+_K_init_segtable:
+/* VA 0xC0000000 */
+ .word BKP(0x00000000), BKP(0x00400000), BKP(0x00800000), BKP(0x00c00000)
+ .word BKP(0x01000000), BKP(0x01400000), BKP(0x01800000), BKP(0x01c00000)
+ .word BKP(0x02000000), BKP(0x02400000), BKP(0x02800000), BKP(0x02c00000)
+ .word BKP(0x03000000), BKP(0x03400000), BKP(0x03800000), BKP(0x03c00000)
+ .word BKP(0x04000000), BKP(0x04400000), BKP(0x04800000), BKP(0x04c00000)
+ .word BKP(0x05000000), BKP(0x05400000), BKP(0x05800000), BKP(0x05c00000)
+ .word BKP(0x06000000), BKP(0x06400000), BKP(0x06800000), BKP(0x06c00000)
+ .word BKP(0x07000000), BKP(0x07400000), BKP(0x07800000), BKP(0x07c00000)
+
+ .word BKP(0x08000000), BKP(0x08400000), BKP(0x08800000), BKP(0x08c00000)
+ .word BKP(0x09000000), BKP(0x09400000), BKP(0x09800000), BKP(0x09c00000)
+ .word BKP(0x0a000000), BKP(0x0a400000), BKP(0x0a800000), BKP(0x0ac00000)
+ .word BKP(0x0b000000), BKP(0x0b400000), BKP(0x0b800000), BKP(0x0bc00000)
+ .word BKP(0x0c000000), BKP(0x0c400000), BKP(0x0c800000), BKP(0x0cc00000)
+ .word BKP(0x0d000000), BKP(0x0d400000), BKP(0x0d800000), BKP(0x0dc00000)
+ .word BKP(0x0e000000), BKP(0x0e400000), BKP(0x0e800000), BKP(0x0ec00000)
+ .word BKP(0x0f000000), BKP(0x0f400000), BKP(0x0f800000), BKP(0x0fc00000)
+
+ .word BKP(0x10000000), BKP(0x10400000), BKP(0x10800000), BKP(0x10c00000)
+ .word BKP(0x11000000), BKP(0x11400000), BKP(0x11800000), BKP(0x11c00000)
+ .word BKP(0x12000000), BKP(0x12400000), BKP(0x12800000), BKP(0x12c00000)
+ .word BKP(0x13000000), BKP(0x13400000), BKP(0x13800000), BKP(0x13c00000)
+ .word BKP(0x14000000), BKP(0x14400000), BKP(0x14800000), BKP(0x14c00000)
+ .word BKP(0x15000000), BKP(0x15400000), BKP(0x15800000), BKP(0x15c00000)
+ .word BKP(0x16000000), BKP(0x16400000), BKP(0x16800000), BKP(0x16c00000)
+ .word BKP(0x17000000), BKP(0x17400000), BKP(0x17800000), BKP(0x17c00000)
+
+ .word BKP(0x18000000), BKP(0x18400000), BKP(0x18800000), BKP(0x18c00000)
+ .word BKP(0x19000000), BKP(0x19400000), BKP(0x19800000), BKP(0x19c00000)
+ .word BKP(0x1a000000), BKP(0x1a400000), BKP(0x1a800000), BKP(0x1ac00000)
+ .word BKP(0x1b000000), BKP(0x1b400000), BKP(0x1b800000), BKP(0x1bc00000)
+ .word BKP(0x1c000000), BKP(0x1c400000), BKP(0x1c800000), BKP(0x1cc00000)
+ .word BKP(0x1d000000), BKP(0x1d400000), BKP(0x1d800000), BKP(0x1dc00000)
+ .word BKP(0x1e000000), BKP(0x1e400000), BKP(0x1e800000), BKP(0x1ec00000)
+ .word BKP(0x1f000000), BKP(0x1f400000), BKP(0x1f800000), BKP(0x1fc00000)
+
+ .word BKP(0x20000000), BKP(0x20400000), BKP(0x20800000), BKP(0x20c00000)
+ .word BKP(0x21000000), BKP(0x21400000), BKP(0x21800000), BKP(0x21c00000)
+ .word BKP(0x22000000), BKP(0x22400000), BKP(0x22800000), BKP(0x22c00000)
+ .word BKP(0x23000000), BKP(0x23400000), BKP(0x23800000), BKP(0x23c00000)
+ .word BKP(0x24000000), BKP(0x24400000), BKP(0x24800000), BKP(0x24c00000)
+ .word BKP(0x25000000), BKP(0x25400000), BKP(0x25800000), BKP(0x25c00000)
+ .word BKP(0x26000000), BKP(0x26400000), BKP(0x26800000), BKP(0x26c00000)
+ .word BKP(0x27000000), BKP(0x27400000), BKP(0x27800000), BKP(0x27c00000)
+
+ .word BKP(0x28000000), BKP(0x28400000), BKP(0x28800000), BKP(0x28c00000)
+ .word BKP(0x29000000), BKP(0x29400000), BKP(0x29800000), BKP(0x29c00000)
+ .word BKP(0x2a000000), BKP(0x2a400000), BKP(0x2a800000), BKP(0x2ac00000)
+ .word BKP(0x2b000000), BKP(0x2b400000), BKP(0x2b800000), BKP(0x2bc00000)
+ .word BKP(0x2c000000), BKP(0x2c400000), BKP(0x2c800000), BKP(0x2cc00000)
+ .word BKP(0x2d000000), BKP(0x2d400000), BKP(0x2d800000), BKP(0x2dc00000)
+ .word BKP(0x2e000000), BKP(0x2e400000), BKP(0x2e800000), BKP(0x2ec00000)
+ .word BKP(0x2f000000), BKP(0x2f400000), BKP(0x2f800000), BKP(0x2fc00000)
+
+ .word BKP(0x30000000), BKP(0x30400000), BKP(0x30800000), BKP(0x30c00000)
+ .word BKP(0x31000000), BKP(0x31400000), BKP(0x31800000), BKP(0x31c00000)
+ .word BKP(0x32000000), BKP(0x32400000), BKP(0x32800000), BKP(0x32c00000)
+ .word BKP(0x33000000), BKP(0x33400000), BKP(0x33800000), BKP(0x33c00000)
+ .word BKP(0x34000000), BKP(0x34400000), BKP(0x34800000), BKP(0x34c00000)
+ .word BKP(0x35000000), BKP(0x35400000), BKP(0x35800000), BKP(0x35c00000)
+ .word BKP(0x36000000), BKP(0x36400000), BKP(0x36800000), BKP(0x36c00000)
+ .word BKP(0x37000000), BKP(0x37400000), BKP(0x37800000), BKP(0x37c00000)
+
+ .word BKP(0x38000000), BKP(0x38400000), BKP(0x38800000), BKP(0x38c00000)
+ .word BKP(0x39000000), BKP(0x39400000), BKP(0x39800000), BKP(0x39c00000)
+ .word BKP(0x3a000000), BKP(0x3a400000), BKP(0x3a800000), BKP(0x3ac00000)
+ .word BKP(0x3b000000), BKP(0x3b400000), BKP(0x3b800000), BKP(0x3bc00000)
+ .word BKP(0x3c000000), BKP(0x3c400000), BKP(0x3c800000), BKP(0x3cc00000)
+ .word BKP(0x3d000000), BKP(0x3d400000), BKP(0x3d800000), BKP(0x3dc00000)
+_K_io_map:
+ .word X,X,X,X /* 0x3e000000 - device IO early remap */
+ .word X,X,X,X /* 0x3f000000 - hypervisor space*/
+
+#if 0
+/*
+ * This is in here as an example for devices which need to be mapped really
+ * early.
+ */
+ .p2align 12
+ .globl _K_io_kmap
+ .globl _K_init_devicetable
+_K_init_devicetable: /* Should be 4MB worth of entries */
+ .word FOURK_IO(MSM_GPIO1_PHYS),FOURK_IO(MSM_GPIO2_PHYS),FOURK_IO(MSM_SIRC_PHYS),X
+ .word FOURK_IO(TLMM_GPIO1_PHYS),X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+ .word X,X,X,X
+#endif
diff --git a/arch/hexagon/kernel/vm_ops.S b/arch/hexagon/kernel/vm_ops.S
new file mode 100644
index 000000000..58f2b92a3
--- /dev/null
+++ b/arch/hexagon/kernel/vm_ops.S
@@ -0,0 +1,102 @@
+/*
+ * Hexagon VM instruction support
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/hexagon_vm.h>
+
+/*
+ * C wrappers for virtual machine "instructions". These
+ * could be, and perhaps some day will be, handled as in-line
+ * macros, but for tracing/debugging it's handy to have
+ * a single point of invocation for each of them.
+ * Conveniently, they take parameters and return values
+ * consistent with the ABI calling convention.
+ */
+
+ENTRY(__vmrte)
+ trap1(#HVM_TRAP1_VMRTE);
+ jumpr R31;
+
+ENTRY(__vmsetvec)
+ trap1(#HVM_TRAP1_VMSETVEC);
+ jumpr R31;
+
+ENTRY(__vmsetie)
+ trap1(#HVM_TRAP1_VMSETIE);
+ jumpr R31;
+
+ENTRY(__vmgetie)
+ trap1(#HVM_TRAP1_VMGETIE);
+ jumpr R31;
+
+ENTRY(__vmintop)
+ trap1(#HVM_TRAP1_VMINTOP);
+ jumpr R31;
+
+ENTRY(__vmclrmap)
+ trap1(#HVM_TRAP1_VMCLRMAP);
+ jumpr R31;
+
+ENTRY(__vmnewmap)
+ r1 = #VM_NEWMAP_TYPE_PGTABLES;
+ trap1(#HVM_TRAP1_VMNEWMAP);
+ jumpr R31;
+
+ENTRY(__vmcache)
+ trap1(#HVM_TRAP1_VMCACHE);
+ jumpr R31;
+
+ENTRY(__vmgettime)
+ trap1(#HVM_TRAP1_VMGETTIME);
+ jumpr R31;
+
+ENTRY(__vmsettime)
+ trap1(#HVM_TRAP1_VMSETTIME);
+ jumpr R31;
+
+ENTRY(__vmwait)
+ trap1(#HVM_TRAP1_VMWAIT);
+ jumpr R31;
+
+ENTRY(__vmyield)
+ trap1(#HVM_TRAP1_VMYIELD);
+ jumpr R31;
+
+ENTRY(__vmstart)
+ trap1(#HVM_TRAP1_VMSTART);
+ jumpr R31;
+
+ENTRY(__vmstop)
+ trap1(#HVM_TRAP1_VMSTOP);
+ jumpr R31;
+
+ENTRY(__vmvpid)
+ trap1(#HVM_TRAP1_VMVPID);
+ jumpr R31;
+
+/* Probably not actually going to use these; see vm_entry.S */
+
+ENTRY(__vmsetregs)
+ trap1(#HVM_TRAP1_VMSETREGS);
+ jumpr R31;
+
+ENTRY(__vmgetregs)
+ trap1(#HVM_TRAP1_VMGETREGS);
+ jumpr R31;
diff --git a/arch/hexagon/kernel/vm_switch.S b/arch/hexagon/kernel/vm_switch.S
new file mode 100644
index 000000000..62c6df91b
--- /dev/null
+++ b/arch/hexagon/kernel/vm_switch.S
@@ -0,0 +1,95 @@
+/*
+ * Context switch support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <asm/asm-offsets.h>
+
+.text
+
+/*
+ * The register used as a fast-path thread information pointer
+ * is determined as a kernel configuration option. If it happens
+ * to be a callee-save register, we're going to be saving and
+ * restoring it twice here.
+ *
+ * This code anticipates a revised ABI where R20-23 are added
+ * to the set of callee-save registers, but this should be
+ * backward compatible to legacy tools.
+ */
+
+
+/*
+ * void switch_to(struct task_struct *prev,
+ * struct task_struct *next, struct task_struct *last);
+ */
+ .p2align 2
+ .globl __switch_to
+ .type __switch_to, @function
+
+/*
+ * When we exit the wormhole, we need to store the previous task
+ * in the new R0's pointer. Technically it should be R2, but they should
+ * be the same; seems like a legacy thing. In short, don't butcher
+ * R0, let it go back out unmolested.
+ */
+
+__switch_to:
+ /*
+ * Push callee-saves onto "prev" stack.
+ * Here, we're sneaky because the LR and FP
+ * storage of the thread_stack structure
+ * is automagically allocated by allocframe,
+ * so we pass struct size less 8.
+ */
+ allocframe(#(_SWITCH_STACK_SIZE - 8));
+ memd(R29+#(_SWITCH_R2726))=R27:26;
+ memd(R29+#(_SWITCH_R2524))=R25:24;
+ memd(R29+#(_SWITCH_R2322))=R23:22;
+ memd(R29+#(_SWITCH_R2120))=R21:20;
+ memd(R29+#(_SWITCH_R1918))=R19:18;
+ memd(R29+#(_SWITCH_R1716))=R17:16;
+ /* Stash thread_info pointer in task_struct */
+ memw(R0+#_TASK_THREAD_INFO) = THREADINFO_REG;
+ memw(R0 +#(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP)) = R29;
+ /* Switch to "next" stack and restore callee saves from there */
+ R29 = memw(R1 + #(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP));
+ {
+ R27:26 = memd(R29+#(_SWITCH_R2726));
+ R25:24 = memd(R29+#(_SWITCH_R2524));
+ }
+ {
+ R23:22 = memd(R29+#(_SWITCH_R2322));
+ R21:20 = memd(R29+#(_SWITCH_R2120));
+ }
+ {
+ R19:18 = memd(R29+#(_SWITCH_R1918));
+ R17:16 = memd(R29+#(_SWITCH_R1716));
+ }
+ {
+ /* THREADINFO_REG is currently one of the callee-saved regs
+ * above, and so be sure to re-load it last.
+ */
+ THREADINFO_REG = memw(R1 + #_TASK_THREAD_INFO);
+ R31:30 = memd(R29+#_SWITCH_FP);
+ }
+ {
+ R29 = add(R29,#_SWITCH_STACK_SIZE);
+ jumpr R31;
+ }
+ .size __switch_to, .-__switch_to
diff --git a/arch/hexagon/kernel/vm_vectors.S b/arch/hexagon/kernel/vm_vectors.S
new file mode 100644
index 000000000..791a7422d
--- /dev/null
+++ b/arch/hexagon/kernel/vm_vectors.S
@@ -0,0 +1,48 @@
+/*
+ * Event jump tables
+ *
+ * Copyright (c) 2010-2012,2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <asm/hexagon_vm.h>
+
+.text
+
+/* This is registered early on to allow angel */
+.global _K_provisional_vec
+_K_provisional_vec:
+ jump 1f;
+ jump 1f;
+ jump 1f;
+ jump 1f;
+ jump 1f;
+ trap1(#HVM_TRAP1_VMRTE)
+ jump 1f;
+ jump 1f;
+
+
+.global _K_VM_event_vector
+_K_VM_event_vector:
+1:
+ jump 1b; /* Reset */
+ jump _K_enter_machcheck;
+ jump _K_enter_genex;
+ jump _K_enter_debug;
+ jump 1b; /* 4 Rsvd */
+ jump _K_enter_trap0;
+ jump 1b; /* 6 Rsvd */
+ jump _K_enter_interrupt;
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..757f95541
--- /dev/null
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -0,0 +1,80 @@
+/*
+ * Linker script for Hexagon kernel
+ *
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
+#include <asm/mem-layout.h> /* except for page_offset */
+#include <asm/cache.h> /* and now we're pulling cache line size */
+#include <asm/thread_info.h> /* and we need THREAD_SIZE too */
+
+OUTPUT_ARCH(hexagon)
+ENTRY(stext)
+
+jiffies = jiffies_64;
+
+/*
+See asm-generic/vmlinux.lds.h for expansion of some of these macros.
+See asm-generic/sections.h for seemingly required labels.
+*/
+
+#define PAGE_SIZE _PAGE_SIZE
+
+SECTIONS
+{
+ . = PAGE_OFFSET;
+
+ __init_begin = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ __init_end = .;
+
+ . = ALIGN(_PAGE_SIZE);
+ _stext = .;
+ .text : AT(ADDR(.text)) {
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ }
+ _etext = .;
+
+ INIT_DATA_SECTION(PAGE_SIZE)
+
+ _sdata = .;
+ RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
+ RO_DATA_SECTION(PAGE_SIZE)
+ _edata = .;
+
+ EXCEPTION_TABLE(16)
+ NOTES
+
+ BSS_SECTION(_PAGE_SIZE, _PAGE_SIZE, _PAGE_SIZE)
+
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS
+}
diff --git a/arch/hexagon/lib/Makefile b/arch/hexagon/lib/Makefile
new file mode 100644
index 000000000..874655e85
--- /dev/null
+++ b/arch/hexagon/lib/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for hexagon-specific library files.
+#
+obj-y = checksum.o io.o memcpy.o memset.o
diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
new file mode 100644
index 000000000..7cd0a2259
--- /dev/null
+++ b/arch/hexagon/lib/checksum.c
@@ -0,0 +1,202 @@
+/*
+ * Checksum functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/* This was derived from arch/alpha/lib/checksum.c */
+
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/byteorder.h>
+#include <net/checksum.h>
+#include <linux/uaccess.h>
+#include <asm/intrinsics.h>
+
+
+/* Vector value operations */
+#define SIGN(x, y) ((0x8000ULL*x)<<y)
+#define CARRY(x, y) ((0x0002ULL*x)<<y)
+#define SELECT(x, y) ((0x0001ULL*x)<<y)
+
+#define VR_NEGATE(a, b, c, d) (SIGN(a, 48) + SIGN(b, 32) + SIGN(c, 16) \
+ + SIGN(d, 0))
+#define VR_CARRY(a, b, c, d) (CARRY(a, 48) + CARRY(b, 32) + CARRY(c, 16) \
+ + CARRY(d, 0))
+#define VR_SELECT(a, b, c, d) (SELECT(a, 48) + SELECT(b, 32) + SELECT(c, 16) \
+ + SELECT(d, 0))
+
+
+/* optimized HEXAGON V3 intrinsic version */
+static inline unsigned short from64to16(u64 x)
+{
+ u64 sum;
+
+ sum = HEXAGON_P_vrmpyh_PP(x^VR_NEGATE(1, 1, 1, 1),
+ VR_SELECT(1, 1, 1, 1));
+ sum += VR_CARRY(0, 0, 1, 0);
+ sum = HEXAGON_P_vrmpyh_PP(sum, VR_SELECT(0, 0, 1, 1));
+
+ return 0xFFFF & sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented.
+ */
+__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ return (__force __sum16)~from64to16(
+ (__force u64)saddr + (__force u64)daddr +
+ (__force u64)sum + ((len + proto) << 8));
+}
+
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ u64 result;
+
+ result = (__force u64)saddr + (__force u64)daddr +
+ (__force u64)sum + ((len + proto) << 8);
+
+ /* Fold down to 32-bits so we don't lose in the typedef-less
+ network stack. */
+ /* 64 to 33 */
+ result = (result & 0xffffffffUL) + (result >> 32);
+ /* 33 to 32 */
+ result = (result & 0xffffffffUL) + (result >> 32);
+ return (__force __wsum)result;
+}
+EXPORT_SYMBOL(csum_tcpudp_nofold);
+
+/*
+ * Do a 64-bit checksum on an arbitrary memory area..
+ *
+ * This isn't a great routine, but it's not _horrible_ either. The
+ * inner loop could be unrolled a bit further, and there are better
+ * ways to do the carry, but this is reasonable.
+ */
+
+/* optimized HEXAGON intrinsic version, with over read fixed */
+unsigned int do_csum(const void *voidptr, int len)
+{
+ u64 sum0, sum1, x0, x1, *ptr8_o, *ptr8_e, *ptr8;
+ int i, start, mid, end, mask;
+ const char *ptr = voidptr;
+ unsigned short *ptr2;
+ unsigned int *ptr4;
+
+ if (len <= 0)
+ return 0;
+
+ start = 0xF & (16-(((int) ptr) & 0xF)) ;
+ mask = 0x7fffffffUL >> HEXAGON_R_cl0_R(len);
+ start = start & mask ;
+
+ mid = len - start;
+ end = mid & 0xF;
+ mid = mid>>4;
+ sum0 = mid << 18;
+ sum1 = 0;
+
+ if (start & 1)
+ sum0 += (u64) (ptr[0] << 8);
+ ptr2 = (unsigned short *) &ptr[start & 1];
+ if (start & 2)
+ sum1 += (u64) ptr2[0];
+ ptr4 = (unsigned int *) &ptr[start & 3];
+ if (start & 4) {
+ sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
+ VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
+ VR_SELECT(0, 0, 1, 1));
+ sum0 += VR_SELECT(0, 0, 1, 0);
+ }
+ ptr8 = (u64 *) &ptr[start & 7];
+ if (start & 8) {
+ sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
+ VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
+ VR_SELECT(1, 1, 1, 1));
+ sum1 += VR_CARRY(0, 0, 1, 0);
+ }
+ ptr8_o = (u64 *) (ptr + start);
+ ptr8_e = (u64 *) (ptr + start + 8);
+
+ if (mid) {
+ x0 = *ptr8_e; ptr8_e += 2;
+ x1 = *ptr8_o; ptr8_o += 2;
+ if (mid > 1)
+ for (i = 0; i < mid-1; i++) {
+ sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
+ x0^VR_NEGATE(1, 1, 1, 1),
+ VR_SELECT(1, 1, 1, 1));
+ sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
+ x1^VR_NEGATE(1, 1, 1, 1),
+ VR_SELECT(1, 1, 1, 1));
+ x0 = *ptr8_e; ptr8_e += 2;
+ x1 = *ptr8_o; ptr8_o += 2;
+ }
+ sum0 = HEXAGON_P_vrmpyhacc_PP(sum0, x0^VR_NEGATE(1, 1, 1, 1),
+ VR_SELECT(1, 1, 1, 1));
+ sum1 = HEXAGON_P_vrmpyhacc_PP(sum1, x1^VR_NEGATE(1, 1, 1, 1),
+ VR_SELECT(1, 1, 1, 1));
+ }
+
+ ptr4 = (unsigned int *) &ptr[start + (mid * 16) + (end & 8)];
+ if (end & 4) {
+ sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
+ VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
+ VR_SELECT(0, 0, 1, 1));
+ sum1 += VR_SELECT(0, 0, 1, 0);
+ }
+ ptr2 = (unsigned short *) &ptr[start + (mid * 16) + (end & 12)];
+ if (end & 2)
+ sum0 += (u64) ptr2[0];
+
+ if (end & 1)
+ sum1 += (u64) ptr[start + (mid * 16) + (end & 14)];
+
+ ptr8 = (u64 *) &ptr[start + (mid * 16)];
+ if (end & 8) {
+ sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
+ VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
+ VR_SELECT(1, 1, 1, 1));
+ sum0 += VR_CARRY(0, 0, 1, 0);
+ }
+ sum0 = HEXAGON_P_vrmpyh_PP((sum0+sum1)^VR_NEGATE(0, 0, 0, 1),
+ VR_SELECT(0, 0, 1, 1));
+ sum0 += VR_NEGATE(0, 0, 0, 1);
+ sum0 = HEXAGON_P_vrmpyh_PP(sum0, VR_SELECT(0, 0, 1, 1));
+
+ if (start & 1)
+ sum0 = (sum0 << 8) | (0xFF & (sum0 >> 8));
+
+ return 0xFFFF & sum0;
+}
+
+/*
+ * copy from ds while checksumming, otherwise like csum_partial
+ */
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/hexagon/lib/io.c b/arch/hexagon/lib/io.c
new file mode 100644
index 000000000..e5dfed1cf
--- /dev/null
+++ b/arch/hexagon/lib/io.c
@@ -0,0 +1,95 @@
+/*
+ * I/O access functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <asm/io.h>
+
+/* These are all FIFO routines! */
+
+/*
+ * __raw_readsw - read words a short at a time
+ * @addr: source address
+ * @data: data address
+ * @len: number of shorts to read
+ */
+void __raw_readsw(const void __iomem *addr, void *data, int len)
+{
+ const volatile short int *src = (short int *) addr;
+ short int *dst = (short int *) data;
+
+ if ((u32)data & 0x1)
+ panic("unaligned pointer to readsw");
+
+ while (len-- > 0)
+ *dst++ = *src;
+
+}
+EXPORT_SYMBOL(__raw_readsw);
+
+/*
+ * __raw_writesw - read words a short at a time
+ * @addr: source address
+ * @data: data address
+ * @len: number of shorts to read
+ */
+void __raw_writesw(void __iomem *addr, const void *data, int len)
+{
+ const short int *src = (short int *)data;
+ volatile short int *dst = (short int *)addr;
+
+ if ((u32)data & 0x1)
+ panic("unaligned pointer to writesw");
+
+ while (len-- > 0)
+ *dst = *src++;
+
+
+}
+EXPORT_SYMBOL(__raw_writesw);
+
+/* Pretty sure len is pre-adjusted for the length of the access already */
+void __raw_readsl(const void __iomem *addr, void *data, int len)
+{
+ const volatile long *src = (long *) addr;
+ long *dst = (long *) data;
+
+ if ((u32)data & 0x3)
+ panic("unaligned pointer to readsl");
+
+ while (len-- > 0)
+ *dst++ = *src;
+
+
+}
+EXPORT_SYMBOL(__raw_readsl);
+
+void __raw_writesl(void __iomem *addr, const void *data, int len)
+{
+ const long *src = (long *)data;
+ volatile long *dst = (long *)addr;
+
+ if ((u32)data & 0x3)
+ panic("unaligned pointer to writesl");
+
+ while (len-- > 0)
+ *dst = *src++;
+
+
+}
+EXPORT_SYMBOL(__raw_writesl);
diff --git a/arch/hexagon/lib/memcpy.S b/arch/hexagon/lib/memcpy.S
new file mode 100644
index 000000000..a46093a88
--- /dev/null
+++ b/arch/hexagon/lib/memcpy.S
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * Description
+ *
+ * library function for memcpy where length bytes are copied from
+ * ptr_in to ptr_out. ptr_out is returned unchanged.
+ * Allows any combination of alignment on input and output pointers
+ * and length from 0 to 2^32-1
+ *
+ * Restrictions
+ * The arrays should not overlap, the program will produce undefined output
+ * if they do.
+ * For blocks less than 16 bytes a byte by byte copy is performed. For
+ * 8byte alignments, and length multiples, a dword copy is performed up to
+ * 96bytes
+ * History
+ *
+ * DJH 5/15/09 Initial version 1.0
+ * DJH 6/ 1/09 Version 1.1 modified ABI to inlcude R16-R19
+ * DJH 7/12/09 Version 1.2 optimized codesize down to 760 was 840
+ * DJH 10/14/09 Version 1.3 added special loop for aligned case, was
+ * overreading bloated codesize back up to 892
+ * DJH 4/20/10 Version 1.4 fixed Ldword_loop_epilog loop to prevent loads
+ * occurring if only 1 left outstanding, fixes bug
+ * # 3888, corrected for all alignments. Peeled off
+ * 1 32byte chunk from kernel loop and extended 8byte
+ * loop at end to solve all combinations and prevent
+ * over read. Fixed Ldword_loop_prolog to prevent
+ * overread for blocks less than 48bytes. Reduced
+ * codesize to 752 bytes
+ * DJH 4/21/10 version 1.5 1.4 fix broke code for input block ends not
+ * aligned to dword boundaries,underwriting by 1
+ * byte, added detection for this and fixed. A
+ * little bloat.
+ * DJH 4/23/10 version 1.6 corrected stack error, R20 was not being restored
+ * always, fixed the error of R20 being modified
+ * before it was being saved
+ * Natural c model
+ * ===============
+ * void * memcpy(char * ptr_out, char * ptr_in, int length) {
+ * int i;
+ * if(length) for(i=0; i < length; i++) { ptr_out[i] = ptr_in[i]; }
+ * return(ptr_out);
+ * }
+ *
+ * Optimized memcpy function
+ * =========================
+ * void * memcpy(char * ptr_out, char * ptr_in, int len) {
+ * int i, prolog, kernel, epilog, mask;
+ * u8 offset;
+ * s64 data0, dataF8, data70;
+ *
+ * s64 * ptr8_in;
+ * s64 * ptr8_out;
+ * s32 * ptr4;
+ * s16 * ptr2;
+ *
+ * offset = ((int) ptr_in) & 7;
+ * ptr8_in = (s64 *) &ptr_in[-offset]; //read in the aligned pointers
+ *
+ * data70 = *ptr8_in++;
+ * dataF8 = *ptr8_in++;
+ *
+ * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
+ *
+ * prolog = 32 - ((int) ptr_out);
+ * mask = 0x7fffffff >> HEXAGON_R_cl0_R(len);
+ * prolog = prolog & mask;
+ * kernel = len - prolog;
+ * epilog = kernel & 0x1F;
+ * kernel = kernel>>5;
+ *
+ * if (prolog & 1) { ptr_out[0] = (u8) data0; data0 >>= 8; ptr_out += 1;}
+ * ptr2 = (s16 *) &ptr_out[0];
+ * if (prolog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
+ * ptr4 = (s32 *) &ptr_out[0];
+ * if (prolog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
+ *
+ * offset = offset + (prolog & 7);
+ * if (offset >= 8) {
+ * data70 = dataF8;
+ * dataF8 = *ptr8_in++;
+ * }
+ * offset = offset & 0x7;
+ *
+ * prolog = prolog >> 3;
+ * if (prolog) for (i=0; i < prolog; i++) {
+ * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
+ * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
+ * data70 = dataF8;
+ * dataF8 = *ptr8_in++;
+ * }
+ * if(kernel) { kernel -= 1; epilog += 32; }
+ * if(kernel) for(i=0; i < kernel; i++) {
+ * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
+ * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
+ * data70 = *ptr8_in++;
+ *
+ * data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
+ * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
+ * dataF8 = *ptr8_in++;
+ *
+ * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
+ * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
+ * data70 = *ptr8_in++;
+ *
+ * data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
+ * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
+ * dataF8 = *ptr8_in++;
+ * }
+ * epilogdws = epilog >> 3;
+ * if (epilogdws) for (i=0; i < epilogdws; i++) {
+ * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
+ * ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
+ * data70 = dataF8;
+ * dataF8 = *ptr8_in++;
+ * }
+ * data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
+ *
+ * ptr4 = (s32 *) &ptr_out[0];
+ * if (epilog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
+ * ptr2 = (s16 *) &ptr_out[0];
+ * if (epilog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
+ * if (epilog & 1) { *ptr_out++ = (u8) data0; }
+ *
+ * return(ptr_out - length);
+ * }
+ *
+ * Codesize : 784 bytes
+ */
+
+
+#define ptr_out R0 /* destination pounter */
+#define ptr_in R1 /* source pointer */
+#define len R2 /* length of copy in bytes */
+
+#define data70 R13:12 /* lo 8 bytes of non-aligned transfer */
+#define dataF8 R11:10 /* hi 8 bytes of non-aligned transfer */
+#define ldata0 R7:6 /* even 8 bytes chunks */
+#define ldata1 R25:24 /* odd 8 bytes chunks */
+#define data1 R7 /* lower 8 bytes of ldata1 */
+#define data0 R6 /* lower 8 bytes of ldata0 */
+
+#define ifbyte p0 /* if transfer has bytes in epilog/prolog */
+#define ifhword p0 /* if transfer has shorts in epilog/prolog */
+#define ifword p0 /* if transfer has words in epilog/prolog */
+#define noprolog p0 /* no prolog, xfer starts at 32byte */
+#define nokernel p1 /* no 32byte multiple block in the transfer */
+#define noepilog p0 /* no epilog, xfer ends on 32byte boundary */
+#define align p2 /* alignment of input rel to 8byte boundary */
+#define kernel1 p0 /* kernel count == 1 */
+
+#define dalign R25 /* rel alignment of input to output data */
+#define star3 R16 /* number bytes in prolog - dwords */
+#define rest R8 /* length - prolog bytes */
+#define back R7 /* nr bytes > dword boundary in src block */
+#define epilog R3 /* bytes in epilog */
+#define inc R15:14 /* inc kernel by -1 and defetch ptr by 32 */
+#define kernel R4 /* number of 32byte chunks in kernel */
+#define ptr_in_p_128 R5 /* pointer for prefetch of input data */
+#define mask R8 /* mask used to determine prolog size */
+#define shift R8 /* used to work a shifter to extract bytes */
+#define shift2 R5 /* in epilog to workshifter to extract bytes */
+#define prolog R15 /* bytes in prolog */
+#define epilogdws R15 /* number dwords in epilog */
+#define shiftb R14 /* used to extract bytes */
+#define offset R9 /* same as align in reg */
+#define ptr_out_p_32 R17 /* pointer to output dczero */
+#define align888 R14 /* if simple dword loop can be used */
+#define len8 R9 /* number of dwords in length */
+#define over R20 /* nr of bytes > last inp buf dword boundary */
+
+#define ptr_in_p_128kernel R5:4 /* packed fetch pointer & kernel cnt */
+
+ .section .text
+ .p2align 4
+ .global memcpy
+ .type memcpy, @function
+memcpy:
+{
+ p2 = cmp.eq(len, #0); /* =0 */
+ align888 = or(ptr_in, ptr_out); /* %8 < 97 */
+ p0 = cmp.gtu(len, #23); /* %1, <24 */
+ p1 = cmp.eq(ptr_in, ptr_out); /* attempt to overwrite self */
+}
+{
+ p1 = or(p2, p1);
+ p3 = cmp.gtu(len, #95); /* %8 < 97 */
+ align888 = or(align888, len); /* %8 < 97 */
+ len8 = lsr(len, #3); /* %8 < 97 */
+}
+{
+ dcfetch(ptr_in); /* zero/ptrin=ptrout causes fetch */
+ p2 = bitsclr(align888, #7); /* %8 < 97 */
+ if(p1) jumpr r31; /* =0 */
+}
+{
+ p2 = and(p2,!p3); /* %8 < 97 */
+ if (p2.new) len = add(len, #-8); /* %8 < 97 */
+ if (p2.new) jump:NT .Ldwordaligned; /* %8 < 97 */
+}
+{
+ if(!p0) jump .Lbytes23orless; /* %1, <24 */
+ mask.l = #LO(0x7fffffff);
+ /* all bytes before line multiples of data */
+ prolog = sub(#0, ptr_out);
+}
+{
+ /* save r31 on stack, decrement sp by 16 */
+ allocframe(#24);
+ mask.h = #HI(0x7fffffff);
+ ptr_in_p_128 = add(ptr_in, #32);
+ back = cl0(len);
+}
+{
+ memd(sp+#0) = R17:16; /* save r16,r17 on stack6 */
+ r31.l = #LO(.Lmemcpy_return); /* set up final return pointer */
+ prolog &= lsr(mask, back);
+ offset = and(ptr_in, #7);
+}
+{
+ memd(sp+#8) = R25:24; /* save r25,r24 on stack */
+ dalign = sub(ptr_out, ptr_in);
+ r31.h = #HI(.Lmemcpy_return); /* set up final return pointer */
+}
+{
+ /* see if there if input buffer end if aligned */
+ over = add(len, ptr_in);
+ back = add(len, offset);
+ memd(sp+#16) = R21:20; /* save r20,r21 on stack */
+}
+{
+ noprolog = bitsclr(prolog, #7);
+ prolog = and(prolog, #31);
+ dcfetch(ptr_in_p_128);
+ ptr_in_p_128 = add(ptr_in_p_128, #32);
+}
+{
+ kernel = sub(len, prolog);
+ shift = asl(prolog, #3);
+ star3 = and(prolog, #7);
+ ptr_in = and(ptr_in, #-8);
+}
+{
+ prolog = lsr(prolog, #3);
+ epilog = and(kernel, #31);
+ ptr_out_p_32 = add(ptr_out, prolog);
+ over = and(over, #7);
+}
+{
+ p3 = cmp.gtu(back, #8);
+ kernel = lsr(kernel, #5);
+ dcfetch(ptr_in_p_128);
+ ptr_in_p_128 = add(ptr_in_p_128, #32);
+}
+{
+ p1 = cmp.eq(prolog, #0);
+ if(!p1.new) prolog = add(prolog, #1);
+ dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
+ ptr_in_p_128 = add(ptr_in_p_128, #32);
+}
+{
+ nokernel = cmp.eq(kernel,#0);
+ dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
+ ptr_in_p_128 = add(ptr_in_p_128, #32);
+ shiftb = and(shift, #8);
+}
+{
+ dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
+ ptr_in_p_128 = add(ptr_in_p_128, #32);
+ if(nokernel) jump .Lskip64;
+ p2 = cmp.eq(kernel, #1); /* skip ovr if kernel == 0 */
+}
+{
+ dczeroa(ptr_out_p_32);
+ /* don't advance pointer */
+ if(!p2) ptr_out_p_32 = add(ptr_out_p_32, #32);
+}
+{
+ dalign = and(dalign, #31);
+ dczeroa(ptr_out_p_32);
+}
+.Lskip64:
+{
+ data70 = memd(ptr_in++#16);
+ if(p3) dataF8 = memd(ptr_in+#8);
+ if(noprolog) jump .Lnoprolog32;
+ align = offset;
+}
+/* upto initial 7 bytes */
+{
+ ldata0 = valignb(dataF8, data70, align);
+ ifbyte = tstbit(shift,#3);
+ offset = add(offset, star3);
+}
+{
+ if(ifbyte) memb(ptr_out++#1) = data0;
+ ldata0 = lsr(ldata0, shiftb);
+ shiftb = and(shift, #16);
+ ifhword = tstbit(shift,#4);
+}
+{
+ if(ifhword) memh(ptr_out++#2) = data0;
+ ldata0 = lsr(ldata0, shiftb);
+ ifword = tstbit(shift,#5);
+ p2 = cmp.gtu(offset, #7);
+}
+{
+ if(ifword) memw(ptr_out++#4) = data0;
+ if(p2) data70 = dataF8;
+ if(p2) dataF8 = memd(ptr_in++#8); /* another 8 bytes */
+ align = offset;
+}
+.Lnoprolog32:
+{
+ p3 = sp1loop0(.Ldword_loop_prolog, prolog)
+ rest = sub(len, star3); /* whats left after the loop */
+ p0 = cmp.gt(over, #0);
+}
+ if(p0) rest = add(rest, #16);
+.Ldword_loop_prolog:
+{
+ if(p3) memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(dataF8, data70, align);
+ p0 = cmp.gt(rest, #16);
+}
+{
+ data70 = dataF8;
+ if(p0) dataF8 = memd(ptr_in++#8);
+ rest = add(rest, #-8);
+}:endloop0
+.Lkernel:
+{
+ /* kernel is at least 32bytes */
+ p3 = cmp.gtu(kernel, #0);
+ /* last itn. remove edge effects */
+ if(p3.new) kernel = add(kernel, #-1);
+ /* dealt with in last dword loop */
+ if(p3.new) epilog = add(epilog, #32);
+}
+{
+ nokernel = cmp.eq(kernel, #0); /* after adjustment, recheck */
+ if(nokernel.new) jump:NT .Lepilog; /* likely not taken */
+ inc = combine(#32, #-1);
+ p3 = cmp.gtu(dalign, #24);
+}
+{
+ if(p3) jump .Lodd_alignment;
+}
+{
+ loop0(.Loword_loop_25to31, kernel);
+ kernel1 = cmp.gtu(kernel, #1);
+ rest = kernel;
+}
+ .falign
+.Loword_loop_25to31:
+{
+ dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
+ if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
+}
+{
+ dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
+ p3 = cmp.eq(kernel, rest);
+}
+{
+ /* kernel -= 1 */
+ ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
+ /* kill write on first iteration */
+ if(!p3) memd(ptr_out++#8) = ldata1;
+ ldata1 = valignb(dataF8, data70, align);
+ data70 = memd(ptr_in++#8);
+}
+{
+ memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(data70, dataF8, align);
+ dataF8 = memd(ptr_in++#8);
+}
+{
+ memd(ptr_out++#8) = ldata1;
+ ldata1 = valignb(dataF8, data70, align);
+ data70 = memd(ptr_in++#8);
+}
+{
+ memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(data70, dataF8, align);
+ dataF8 = memd(ptr_in++#8);
+ kernel1 = cmp.gtu(kernel, #1);
+}:endloop0
+{
+ memd(ptr_out++#8) = ldata1;
+ jump .Lepilog;
+}
+.Lodd_alignment:
+{
+ loop0(.Loword_loop_00to24, kernel);
+ kernel1 = cmp.gtu(kernel, #1);
+ rest = add(kernel, #-1);
+}
+ .falign
+.Loword_loop_00to24:
+{
+ dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
+ ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
+ if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
+}
+{
+ dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
+}
+{
+ memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(dataF8, data70, align);
+ data70 = memd(ptr_in++#8);
+}
+{
+ memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(data70, dataF8, align);
+ dataF8 = memd(ptr_in++#8);
+}
+{
+ memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(dataF8, data70, align);
+ data70 = memd(ptr_in++#8);
+}
+{
+ memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(data70, dataF8, align);
+ dataF8 = memd(ptr_in++#8);
+ kernel1 = cmp.gtu(kernel, #1);
+}:endloop0
+.Lepilog:
+{
+ noepilog = cmp.eq(epilog,#0);
+ epilogdws = lsr(epilog, #3);
+ kernel = and(epilog, #7);
+}
+{
+ if(noepilog) jumpr r31;
+ if(noepilog) ptr_out = sub(ptr_out, len);
+ p3 = cmp.eq(epilogdws, #0);
+ shift2 = asl(epilog, #3);
+}
+{
+ shiftb = and(shift2, #32);
+ ifword = tstbit(epilog,#2);
+ if(p3) jump .Lepilog60;
+ if(!p3) epilog = add(epilog, #-16);
+}
+{
+ loop0(.Ldword_loop_epilog, epilogdws);
+ /* stop criteria is lsbs unless = 0 then its 8 */
+ p3 = cmp.eq(kernel, #0);
+ if(p3.new) kernel= #8;
+ p1 = cmp.gt(over, #0);
+}
+ /* if not aligned to end of buffer execute 1 more iteration */
+ if(p1) kernel= #0;
+.Ldword_loop_epilog:
+{
+ memd(ptr_out++#8) = ldata0;
+ ldata0 = valignb(dataF8, data70, align);
+ p3 = cmp.gt(epilog, kernel);
+}
+{
+ data70 = dataF8;
+ if(p3) dataF8 = memd(ptr_in++#8);
+ epilog = add(epilog, #-8);
+}:endloop0
+/* copy last 7 bytes */
+.Lepilog60:
+{
+ if(ifword) memw(ptr_out++#4) = data0;
+ ldata0 = lsr(ldata0, shiftb);
+ ifhword = tstbit(epilog,#1);
+ shiftb = and(shift2, #16);
+}
+{
+ if(ifhword) memh(ptr_out++#2) = data0;
+ ldata0 = lsr(ldata0, shiftb);
+ ifbyte = tstbit(epilog,#0);
+ if(ifbyte.new) len = add(len, #-1);
+}
+{
+ if(ifbyte) memb(ptr_out) = data0;
+ ptr_out = sub(ptr_out, len); /* return dest pointer */
+ jumpr r31;
+}
+/* do byte copy for small n */
+.Lbytes23orless:
+{
+ p3 = sp1loop0(.Lbyte_copy, len);
+ len = add(len, #-1);
+}
+.Lbyte_copy:
+{
+ data0 = memb(ptr_in++#1);
+ if(p3) memb(ptr_out++#1) = data0;
+}:endloop0
+{
+ memb(ptr_out) = data0;
+ ptr_out = sub(ptr_out, len);
+ jumpr r31;
+}
+/* do dword copies for aligned in, out and length */
+.Ldwordaligned:
+{
+ p3 = sp1loop0(.Ldword_copy, len8);
+}
+.Ldword_copy:
+{
+ if(p3) memd(ptr_out++#8) = ldata0;
+ ldata0 = memd(ptr_in++#8);
+}:endloop0
+{
+ memd(ptr_out) = ldata0;
+ ptr_out = sub(ptr_out, len);
+ jumpr r31; /* return to function caller */
+}
+.Lmemcpy_return:
+ r21:20 = memd(sp+#16); /* restore r20+r21 */
+{
+ r25:24 = memd(sp+#8); /* restore r24+r25 */
+ r17:16 = memd(sp+#0); /* restore r16+r17 */
+}
+ deallocframe; /* restore r31 and incrment stack by 16 */
+ jumpr r31
diff --git a/arch/hexagon/lib/memset.S b/arch/hexagon/lib/memset.S
new file mode 100644
index 000000000..9341889ea
--- /dev/null
+++ b/arch/hexagon/lib/memset.S
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+
+/* HEXAGON assembly optimized memset */
+/* Replaces the standard library function memset */
+
+
+ .macro HEXAGON_OPT_FUNC_BEGIN name
+ .text
+ .p2align 4
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro HEXAGON_OPT_FUNC_FINISH name
+ .size \name, . - \name
+ .endm
+
+/* FUNCTION: memset (v2 version) */
+#if __HEXAGON_ARCH__ < 3
+HEXAGON_OPT_FUNC_BEGIN memset
+ {
+ r6 = #8
+ r7 = extractu(r0, #3 , #0)
+ p0 = cmp.eq(r2, #0)
+ p1 = cmp.gtu(r2, #7)
+ }
+ {
+ r4 = vsplatb(r1)
+ r8 = r0 /* leave r0 intact for return val */
+ r9 = sub(r6, r7) /* bytes until double alignment */
+ if p0 jumpr r31 /* count == 0, so return */
+ }
+ {
+ r3 = #0
+ r7 = #0
+ p0 = tstbit(r9, #0)
+ if p1 jump 2f /* skip byte loop */
+ }
+
+/* less than 8 bytes to set, so just set a byte at a time and return */
+
+ loop0(1f, r2) /* byte loop */
+ .falign
+1: /* byte loop */
+ {
+ memb(r8++#1) = r4
+ }:endloop0
+ jumpr r31
+ .falign
+2: /* skip byte loop */
+ {
+ r6 = #1
+ p0 = tstbit(r9, #1)
+ p1 = cmp.eq(r2, #1)
+ if !p0 jump 3f /* skip initial byte store */
+ }
+ {
+ memb(r8++#1) = r4
+ r3:2 = sub(r3:2, r7:6)
+ if p1 jumpr r31
+ }
+ .falign
+3: /* skip initial byte store */
+ {
+ r6 = #2
+ p0 = tstbit(r9, #2)
+ p1 = cmp.eq(r2, #2)
+ if !p0 jump 4f /* skip initial half store */
+ }
+ {
+ memh(r8++#2) = r4
+ r3:2 = sub(r3:2, r7:6)
+ if p1 jumpr r31
+ }
+ .falign
+4: /* skip initial half store */
+ {
+ r6 = #4
+ p0 = cmp.gtu(r2, #7)
+ p1 = cmp.eq(r2, #4)
+ if !p0 jump 5f /* skip initial word store */
+ }
+ {
+ memw(r8++#4) = r4
+ r3:2 = sub(r3:2, r7:6)
+ p0 = cmp.gtu(r2, #11)
+ if p1 jumpr r31
+ }
+ .falign
+5: /* skip initial word store */
+ {
+ r10 = lsr(r2, #3)
+ p1 = cmp.eq(r3, #1)
+ if !p0 jump 7f /* skip double loop */
+ }
+ {
+ r5 = r4
+ r6 = #8
+ loop0(6f, r10) /* double loop */
+ }
+
+/* set bytes a double word at a time */
+
+ .falign
+6: /* double loop */
+ {
+ memd(r8++#8) = r5:4
+ r3:2 = sub(r3:2, r7:6)
+ p1 = cmp.eq(r2, #8)
+ }:endloop0
+ .falign
+7: /* skip double loop */
+ {
+ p0 = tstbit(r2, #2)
+ if p1 jumpr r31
+ }
+ {
+ r6 = #4
+ p0 = tstbit(r2, #1)
+ p1 = cmp.eq(r2, #4)
+ if !p0 jump 8f /* skip final word store */
+ }
+ {
+ memw(r8++#4) = r4
+ r3:2 = sub(r3:2, r7:6)
+ if p1 jumpr r31
+ }
+ .falign
+8: /* skip final word store */
+ {
+ p1 = cmp.eq(r2, #2)
+ if !p0 jump 9f /* skip final half store */
+ }
+ {
+ memh(r8++#2) = r4
+ if p1 jumpr r31
+ }
+ .falign
+9: /* skip final half store */
+ {
+ memb(r8++#1) = r4
+ jumpr r31
+ }
+HEXAGON_OPT_FUNC_FINISH memset
+#endif
+
+
+/* FUNCTION: memset (v3 and higher version) */
+#if __HEXAGON_ARCH__ >= 3
+HEXAGON_OPT_FUNC_BEGIN memset
+ {
+ r7=vsplatb(r1)
+ r6 = r0
+ if (r2==#0) jump:nt .L1
+ }
+ {
+ r5:4=combine(r7,r7)
+ p0 = cmp.gtu(r2,#8)
+ if (p0.new) jump:nt .L3
+ }
+ {
+ r3 = r0
+ loop0(.L47,r2)
+ }
+ .falign
+.L47:
+ {
+ memb(r3++#1) = r1
+ }:endloop0 /* start=.L47 */
+ jumpr r31
+.L3:
+ {
+ p0 = tstbit(r0,#0)
+ if (!p0.new) jump:nt .L8
+ p1 = cmp.eq(r2, #1)
+ }
+ {
+ r6 = add(r0, #1)
+ r2 = add(r2,#-1)
+ memb(r0) = r1
+ if (p1) jump .L1
+ }
+.L8:
+ {
+ p0 = tstbit(r6,#1)
+ if (!p0.new) jump:nt .L10
+ }
+ {
+ r2 = add(r2,#-2)
+ memh(r6++#2) = r7
+ p0 = cmp.eq(r2, #2)
+ if (p0.new) jump:nt .L1
+ }
+.L10:
+ {
+ p0 = tstbit(r6,#2)
+ if (!p0.new) jump:nt .L12
+ }
+ {
+ r2 = add(r2,#-4)
+ memw(r6++#4) = r7
+ p0 = cmp.eq(r2, #4)
+ if (p0.new) jump:nt .L1
+ }
+.L12:
+ {
+ p0 = cmp.gtu(r2,#127)
+ if (!p0.new) jump:nt .L14
+ }
+ r3 = and(r6,#31)
+ if (r3==#0) jump:nt .L17
+ {
+ memd(r6++#8) = r5:4
+ r2 = add(r2,#-8)
+ }
+ r3 = and(r6,#31)
+ if (r3==#0) jump:nt .L17
+ {
+ memd(r6++#8) = r5:4
+ r2 = add(r2,#-8)
+ }
+ r3 = and(r6,#31)
+ if (r3==#0) jump:nt .L17
+ {
+ memd(r6++#8) = r5:4
+ r2 = add(r2,#-8)
+ }
+.L17:
+ {
+ r3 = lsr(r2,#5)
+ if (r1!=#0) jump:nt .L18
+ }
+ {
+ r8 = r3
+ r3 = r6
+ loop0(.L46,r3)
+ }
+ .falign
+.L46:
+ {
+ dczeroa(r6)
+ r6 = add(r6,#32)
+ r2 = add(r2,#-32)
+ }:endloop0 /* start=.L46 */
+.L14:
+ {
+ p0 = cmp.gtu(r2,#7)
+ if (!p0.new) jump:nt .L28
+ r8 = lsr(r2,#3)
+ }
+ loop0(.L44,r8)
+ .falign
+.L44:
+ {
+ memd(r6++#8) = r5:4
+ r2 = add(r2,#-8)
+ }:endloop0 /* start=.L44 */
+.L28:
+ {
+ p0 = tstbit(r2,#2)
+ if (!p0.new) jump:nt .L33
+ }
+ {
+ r2 = add(r2,#-4)
+ memw(r6++#4) = r7
+ }
+.L33:
+ {
+ p0 = tstbit(r2,#1)
+ if (!p0.new) jump:nt .L35
+ }
+ {
+ r2 = add(r2,#-2)
+ memh(r6++#2) = r7
+ }
+.L35:
+ p0 = cmp.eq(r2,#1)
+ if (p0) memb(r6) = r1
+.L1:
+ jumpr r31
+.L18:
+ loop0(.L45,r3)
+ .falign
+.L45:
+ dczeroa(r6)
+ {
+ memd(r6++#8) = r5:4
+ r2 = add(r2,#-32)
+ }
+ memd(r6++#8) = r5:4
+ memd(r6++#8) = r5:4
+ {
+ memd(r6++#8) = r5:4
+ }:endloop0 /* start=.L45 */
+ jump .L14
+HEXAGON_OPT_FUNC_FINISH memset
+#endif
diff --git a/arch/hexagon/mm/Makefile b/arch/hexagon/mm/Makefile
new file mode 100644
index 000000000..1a0be4d57
--- /dev/null
+++ b/arch/hexagon/mm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Hexagon memory management subsystem
+#
+
+obj-y := init.o pgalloc.o ioremap.o uaccess.o vm_fault.o cache.o
+obj-y += copy_to_user.o copy_from_user.o strnlen_user.o vm_tlb.o
diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c
new file mode 100644
index 000000000..a7c6d827d
--- /dev/null
+++ b/arch/hexagon/mm/cache.c
@@ -0,0 +1,139 @@
+/*
+ * Cache management functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/hexagon_vm.h>
+
+#define spanlines(start, end) \
+ (((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1)
+
+void flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dccleaninva(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ local_irq_restore(flags);
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dccleana(%0); "
+ " icinva(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ __asm__ __volatile__ (
+ "isync"
+ );
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void hexagon_clean_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dccleana(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ local_irq_restore(flags);
+}
+
+void hexagon_inv_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long lines = spanlines(start, end-1);
+ unsigned long i, flags;
+
+ start &= ~(LINESIZE - 1);
+
+ local_irq_save(flags);
+
+ for (i = 0; i < lines; i++) {
+ __asm__ __volatile__ (
+ " dcinva(%0); "
+ :
+ : "r" (start)
+ );
+ start += LINESIZE;
+ }
+ local_irq_restore(flags);
+}
+
+
+
+
+/*
+ * This is just really brutal and shouldn't be used anyways,
+ * especially on V2. Left here just in case.
+ */
+void flush_cache_all_hexagon(void)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ __vmcache_ickill();
+ __vmcache_dckill();
+ __vmcache_l2kill();
+ local_irq_restore(flags);
+ mb();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, void *src, int len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC) {
+ flush_icache_range((unsigned long) dst,
+ (unsigned long) dst + len);
+ }
+}
diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S
new file mode 100644
index 000000000..7da066fbd
--- /dev/null
+++ b/arch/hexagon/mm/copy_from_user.S
@@ -0,0 +1,114 @@
+/*
+ * User memory copy functions for kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy from user: loads can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME raw_copy_from_user
+#include "copy_user_template.S"
+
+ /* LOAD FAULTS from COPY_FROM_USER */
+
+ /* Alignment loop. r2 has been updated. Return it. */
+ .falign
+1009:
+2009:
+4009:
+ {
+ r0 = r2
+ jumpr r31
+ }
+ /* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
+ /* X - (A - B) == X + B - A */
+ .falign
+8089:
+ {
+ memd(dst) = d_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+4089:
+ {
+ memw(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+2089:
+ {
+ memh(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+ .falign
+1089:
+ {
+ memb(dst) = w_dbuf
+ r2 += sub(src_sav,src)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+
+ /* COPY FROM USER: only loads can fail */
+
+ .section __ex_table,"a"
+ .long 1000b,1009b
+ .long 2000b,2009b
+ .long 4000b,4009b
+ .long 8080b,8089b
+ .long 4080b,4089b
+ .long 2080b,2089b
+ .long 1080b,1089b
+ .previous
diff --git a/arch/hexagon/mm/copy_to_user.S b/arch/hexagon/mm/copy_to_user.S
new file mode 100644
index 000000000..a7b7f8db2
--- /dev/null
+++ b/arch/hexagon/mm/copy_to_user.S
@@ -0,0 +1,92 @@
+/*
+ * User memory copying routines for the Hexagon Kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/* The right way to do this involves valignb
+ * The easy way to do this is only speed up src/dest similar alignment.
+ */
+
+/*
+ * Copy to/from user are the same, except that for packets with a load and
+ * a store, I don't know how to tell which kind of exception we got.
+ * Therefore, we duplicate the function, and handle faulting addresses
+ * differently for each function
+ */
+
+/*
+ * copy to user: stores can fault
+ */
+#define src_sav r13
+#define dst_sav r12
+#define src_dst_sav r13:12
+#define d_dbuf r15:14
+#define w_dbuf r15
+
+#define dst r0
+#define src r1
+#define bytes r2
+#define loopcount r5
+
+#define FUNCNAME raw_copy_to_user
+#include "copy_user_template.S"
+
+ /* STORE FAULTS from COPY_TO_USER */
+ .falign
+1109:
+2109:
+4109:
+ /* Alignment loop. r2 has been updated. Return it. */
+ {
+ r0 = r2
+ jumpr r31
+ }
+ /* Normal copy loops. Use dst-dst_sav to compute distance */
+ /* dst holds best write, no need to unwind any loops */
+ /* X - (A - B) == X + B - A */
+ .falign
+8189:
+8199:
+4189:
+4199:
+2189:
+2199:
+1189:
+1199:
+ {
+ r2 += sub(dst_sav,dst)
+ }
+ {
+ r0 = r2
+ jumpr r31
+ }
+
+ /* COPY TO USER: only stores can fail */
+ .section __ex_table,"a"
+ .long 1100b,1109b
+ .long 2100b,2109b
+ .long 4100b,4109b
+ .long 8180b,8189b
+ .long 8190b,8199b
+ .long 4180b,4189b
+ .long 4190b,4199b
+ .long 2180b,2189b
+ .long 2190b,2199b
+ .long 1180b,1189b
+ .long 1190b,1199b
+ .previous
diff --git a/arch/hexagon/mm/copy_user_template.S b/arch/hexagon/mm/copy_user_template.S
new file mode 100644
index 000000000..254d8cc76
--- /dev/null
+++ b/arch/hexagon/mm/copy_user_template.S
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/* Numerology:
+ * WXYZ
+ * W: width in bytes
+ * X: Load=0, Store=1
+ * Y: Location 0=preamble,8=loop,9=epilog
+ * Z: Location=0,handler=9
+ */
+ .text
+ .global FUNCNAME
+ .type FUNCNAME, @function
+ .p2align 5
+FUNCNAME:
+ {
+ p0 = cmp.gtu(bytes,#0)
+ if (!p0.new) jump:nt .Ldone
+ r3 = or(dst,src)
+ r4 = xor(dst,src)
+ }
+ {
+ p1 = cmp.gtu(bytes,#15)
+ p0 = bitsclr(r3,#7)
+ if (!p0.new) jump:nt .Loop_not_aligned_8
+ src_dst_sav = combine(src,dst)
+ }
+
+ {
+ loopcount = lsr(bytes,#3)
+ if (!p1) jump .Lsmall
+ }
+ p3=sp1loop0(.Loop8,loopcount)
+.Loop8:
+8080:
+8180:
+ {
+ if (p3) memd(dst++#8) = d_dbuf
+ d_dbuf = memd(src++#8)
+ }:endloop0
+8190:
+ {
+ memd(dst++#8) = d_dbuf
+ bytes -= asl(loopcount,#3)
+ jump .Lsmall
+ }
+
+.Loop_not_aligned_8:
+ {
+ p0 = bitsclr(r4,#7)
+ if (p0.new) jump:nt .Lalign
+ }
+ {
+ p0 = bitsclr(r3,#3)
+ if (!p0.new) jump:nt .Loop_not_aligned_4
+ p1 = cmp.gtu(bytes,#7)
+ }
+
+ {
+ if (!p1) jump .Lsmall
+ loopcount = lsr(bytes,#2)
+ }
+ p3=sp1loop0(.Loop4,loopcount)
+.Loop4:
+4080:
+4180:
+ {
+ if (p3) memw(dst++#4) = w_dbuf
+ w_dbuf = memw(src++#4)
+ }:endloop0
+4190:
+ {
+ memw(dst++#4) = w_dbuf
+ bytes -= asl(loopcount,#2)
+ jump .Lsmall
+ }
+
+.Loop_not_aligned_4:
+ {
+ p0 = bitsclr(r3,#1)
+ if (!p0.new) jump:nt .Loop_not_aligned
+ p1 = cmp.gtu(bytes,#3)
+ }
+
+ {
+ if (!p1) jump .Lsmall
+ loopcount = lsr(bytes,#1)
+ }
+ p3=sp1loop0(.Loop2,loopcount)
+.Loop2:
+2080:
+2180:
+ {
+ if (p3) memh(dst++#2) = w_dbuf
+ w_dbuf = memuh(src++#2)
+ }:endloop0
+2190:
+ {
+ memh(dst++#2) = w_dbuf
+ bytes -= asl(loopcount,#1)
+ jump .Lsmall
+ }
+
+.Loop_not_aligned: /* Works for as small as one byte */
+ p3=sp1loop0(.Loop1,bytes)
+.Loop1:
+1080:
+1180:
+ {
+ if (p3) memb(dst++#1) = w_dbuf
+ w_dbuf = memub(src++#1)
+ }:endloop0
+ /* Done */
+1190:
+ {
+ memb(dst) = w_dbuf
+ jumpr r31
+ r0 = #0
+ }
+
+.Lsmall:
+ {
+ p0 = cmp.gtu(bytes,#0)
+ if (p0.new) jump:nt .Loop_not_aligned
+ }
+.Ldone:
+ {
+ r0 = #0
+ jumpr r31
+ }
+ .falign
+.Lalign:
+1000:
+ {
+ if (p0.new) w_dbuf = memub(src)
+ p0 = tstbit(src,#0)
+ if (!p1) jump .Lsmall
+ }
+1100:
+ {
+ if (p0) memb(dst++#1) = w_dbuf
+ if (p0) bytes = add(bytes,#-1)
+ if (p0) src = add(src,#1)
+ }
+2000:
+ {
+ if (p0.new) w_dbuf = memuh(src)
+ p0 = tstbit(src,#1)
+ if (!p1) jump .Lsmall
+ }
+2100:
+ {
+ if (p0) memh(dst++#2) = w_dbuf
+ if (p0) bytes = add(bytes,#-2)
+ if (p0) src = add(src,#2)
+ }
+4000:
+ {
+ if (p0.new) w_dbuf = memw(src)
+ p0 = tstbit(src,#2)
+ if (!p1) jump .Lsmall
+ }
+4100:
+ {
+ if (p0) memw(dst++#4) = w_dbuf
+ if (p0) bytes = add(bytes,#-4)
+ if (p0) src = add(src,#4)
+ jump FUNCNAME
+ }
+ .size FUNCNAME,.-FUNCNAME
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
new file mode 100644
index 000000000..1495d45e4
--- /dev/null
+++ b/arch/hexagon/mm/init.c
@@ -0,0 +1,280 @@
+/*
+ * Memory subsystem initialization for Hexagon
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <asm/atomic.h>
+#include <linux/highmem.h>
+#include <asm/tlb.h>
+#include <asm/sections.h>
+#include <asm/vm_mmu.h>
+
+/*
+ * Define a startpg just past the end of the kernel image and a lastpg
+ * that corresponds to the end of real or simulated platform memory.
+ */
+#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
+
+unsigned long bootmem_lastpg; /* Should be set by platform code */
+unsigned long __phys_offset; /* physical kernel offset >> 12 */
+
+/* Set as variable to limit PMD copies */
+int max_kernel_seg = 0x303;
+
+/* indicate pfn's of high memory */
+unsigned long highstart_pfn, highend_pfn;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/* Default cache attribute for newly created page tables */
+unsigned long _dflt_cache_att = CACHEDEF;
+
+/*
+ * The current "generation" of kernel map, which should not roll
+ * over until Hell freezes over. Actual bound in years needs to be
+ * calculated to confirm.
+ */
+DEFINE_SPINLOCK(kmap_gen_lock);
+
+/* checkpatch says don't init this to 0. */
+unsigned long long kmap_generation;
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Fixes up more stuff for HIGHMEM
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+ /* No idea where this is actually declared. Seems to evade LXR. */
+ free_all_bootmem();
+ mem_init_print_info(NULL);
+
+ /*
+ * To-Do: someone somewhere should wipe out the bootmem map
+ * after we're done?
+ */
+
+ /*
+ * This can be moved to some more virtual-memory-specific
+ * initialization hook at some point. Set the init_mm
+ * descriptors "context" value to point to the initial
+ * kernel segment table's physical address.
+ */
+ init_mm.context.ptbase = __pa(init_mm.pgd);
+}
+
+/*
+ * free_initmem - frees memory used by stuff declared with __init
+ *
+ * Todo: free pages between __init_begin and __init_end; possibly
+ * some devtree related stuff as well.
+ */
+void __ref free_initmem(void)
+{
+}
+
+/*
+ * free_initrd_mem - frees... initrd memory.
+ * @start - start of init memory
+ * @end - end of init memory
+ *
+ * Apparently has to be passed the address of the initrd memory.
+ *
+ * Wrapped by #ifdef CONFIG_BLKDEV_INITRD
+ */
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+}
+
+void sync_icache_dcache(pte_t pte)
+{
+ unsigned long addr;
+ struct page *page;
+
+ page = pte_page(pte);
+ addr = (unsigned long) page_address(page);
+
+ __vmcache_idsync(addr, PAGE_SIZE);
+}
+
+/*
+ * In order to set up page allocator "nodes",
+ * somebody has to call free_area_init() for UMA.
+ *
+ * In this mode, we only have one pg_data_t
+ * structure: contig_mem_data.
+ */
+void __init paging_init(void)
+{
+ unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
+
+ /*
+ * This is not particularly well documented anywhere, but
+ * give ZONE_NORMAL all the memory, including the big holes
+ * left by the kernel+bootmem_map which are already left as reserved
+ * in the bootmem_map; free_area_init should see those bits and
+ * adjust accordingly.
+ */
+
+ zones_sizes[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(zones_sizes); /* sets up the zonelists and mem_map */
+
+ /*
+ * Start of high memory area. Will probably need something more
+ * fancy if we... get more fancy.
+ */
+ high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
+}
+
+#ifndef DMA_RESERVE
+#define DMA_RESERVE (4)
+#endif
+
+#define DMA_CHUNKSIZE (1<<22)
+#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE)
+
+/*
+ * Pick out the memory size. We look for mem=size,
+ * where size is "size[KkMm]"
+ */
+static int __init early_mem(char *p)
+{
+ unsigned long size;
+ char *endp;
+
+ size = memparse(p, &endp);
+
+ bootmem_lastpg = PFN_DOWN(size);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
+size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
+
+void __init setup_arch_memory(void)
+{
+ int bootmap_size;
+ /* XXX Todo: this probably should be cleaned up */
+ u32 *segtable = (u32 *) &swapper_pg_dir[0];
+ u32 *segtable_end;
+
+ /*
+ * Set up boot memory allocator
+ *
+ * The Gorman book also talks about these functions.
+ * This needs to change for highmem setups.
+ */
+
+ /* Prior to this, bootmem_lastpg is actually mem size */
+ bootmem_lastpg += ARCH_PFN_OFFSET;
+
+ /* Memory size needs to be a multiple of 16M */
+ bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
+ ~((BIG_KERNEL_PAGE_SIZE) - 1));
+
+ /*
+ * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
+ * memory allocation
+ */
+
+ max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES);
+ min_low_pfn = ARCH_PFN_OFFSET;
+ bootmap_size = init_bootmem_node(NODE_DATA(0), bootmem_startpg, min_low_pfn, max_low_pfn);
+
+ printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
+ printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
+ printk(KERN_INFO "bootmap_size: %d\n", bootmap_size);
+ printk(KERN_INFO "min_low_pfn: 0x%08lx\n", min_low_pfn);
+ printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
+
+ /*
+ * The default VM page tables (will be) populated with
+ * VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries
+ * higher than what we have memory for.
+ */
+
+ /* this is pointer arithmetic; each entry covers 4MB */
+ segtable = segtable + (PAGE_OFFSET >> 22);
+
+ /* this actually only goes to the end of the first gig */
+ segtable_end = segtable + (1<<(30-22));
+
+ /*
+ * Move forward to the start of empty pages; take into account
+ * phys_offset shift.
+ */
+
+ segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
+ {
+ int i;
+
+ for (i = 1 ; i <= DMA_RESERVE ; i++)
+ segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
+ | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
+ | __HEXAGON_C_UNC << 6
+ | __HVM_PDE_S_4MB);
+ }
+
+ printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
+ segtable_end);
+ while (segtable < (segtable_end-8))
+ *(segtable++) = __HVM_PDE_S_INVALID;
+ /* stop the pointer at the device I/O 4MB page */
+
+ printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
+ segtable);
+
+#if 0
+ /* Other half of the early device table from vm_init_segtable. */
+ printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
+ (unsigned long) _K_init_devicetable-PAGE_OFFSET);
+ *segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
+ __HVM_PDE_S_4KB;
+ printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
+#endif
+
+ /*
+ * Free all the memory that wasn't taken up by the bootmap, the DMA
+ * reserve, or kernel itself.
+ */
+ free_bootmem(PFN_PHYS(bootmem_startpg) + bootmap_size,
+ PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size -
+ DMA_RESERVED_BYTES);
+
+ /*
+ * The bootmem allocator seemingly just lives to feed memory
+ * to the paging system
+ */
+ printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
+ paging_init(); /* See Gorman Book, 2.3 */
+
+ /*
+ * At this point, the page allocator is kind of initialized, but
+ * apparently no pages are available (just like with the bootmem
+ * allocator), and need to be freed themselves via mem_init(),
+ * which is called by start_kernel() later on in the process
+ */
+}
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
new file mode 100644
index 000000000..370ade265
--- /dev/null
+++ b/arch/hexagon/mm/ioremap.c
@@ -0,0 +1,57 @@
+/*
+ * I/O remap functions for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
+{
+ unsigned long last_addr, addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ struct vm_struct *area;
+
+ pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE
+ |(__HEXAGON_C_DEV << 6));
+
+ last_addr = phys_addr + size - 1;
+
+ /* Wrapping not allowed */
+ if (!size || (last_addr < phys_addr))
+ return NULL;
+
+ /* Rounds up to next page size, including whole-page offset */
+ size = PAGE_ALIGN(offset + size);
+
+ area = get_vm_area(size, VM_IOREMAP);
+ addr = (unsigned long)area->addr;
+
+ if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ return (void __iomem *) (offset + addr);
+}
+
+void __iounmap(const volatile void __iomem *addr)
+{
+ vunmap((void *) ((unsigned long) addr & PAGE_MASK));
+}
diff --git a/arch/hexagon/mm/pgalloc.c b/arch/hexagon/mm/pgalloc.c
new file mode 100644
index 000000000..19760a461
--- /dev/null
+++ b/arch/hexagon/mm/pgalloc.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+
+void __init pgtable_cache_init(void)
+{
+}
diff --git a/arch/hexagon/mm/strnlen_user.S b/arch/hexagon/mm/strnlen_user.S
new file mode 100644
index 000000000..0eecb7a76
--- /dev/null
+++ b/arch/hexagon/mm/strnlen_user.S
@@ -0,0 +1,139 @@
+/*
+ * User string length functions for kernel
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#define isrc r0
+#define max r1 /* Do not change! */
+
+#define end r2
+#define tmp1 r3
+
+#define obo r6 /* off-by-one */
+#define start r7
+#define mod8 r8
+#define dbuf r15:14
+#define dcmp r13:12
+
+/*
+ * The vector mask version of this turned out *really* badly.
+ * The hardware loop version also turned out *really* badly.
+ * Seems straight pointer arithmetic basically wins here.
+ */
+
+#define fname __strnlen_user
+
+ .text
+ .global fname
+ .type fname, @function
+ .p2align 5 /* why? */
+fname:
+ {
+ mod8 = and(isrc,#7);
+ end = add(isrc,max);
+ start = isrc;
+ }
+ {
+ P0 = cmp.eq(mod8,#0);
+ mod8 = and(end,#7);
+ dcmp = #0;
+ if (P0.new) jump:t dw_loop; /* fire up the oven */
+ }
+
+alignment_loop:
+fail_1: {
+ tmp1 = memb(start++#1);
+ }
+ {
+ P0 = cmp.eq(tmp1,#0);
+ if (P0.new) jump:nt exit_found;
+ P1 = cmp.gtu(end,start);
+ mod8 = and(start,#7);
+ }
+ {
+ if (!P1) jump exit_error; /* hit the end */
+ P0 = cmp.eq(mod8,#0);
+ }
+ {
+ if (!P0) jump alignment_loop;
+ }
+
+
+
+dw_loop:
+fail_2: {
+ dbuf = memd(start);
+ obo = add(start,#1);
+ }
+ {
+ P0 = vcmpb.eq(dbuf,dcmp);
+ }
+ {
+ tmp1 = P0;
+ P0 = cmp.gtu(end,start);
+ }
+ {
+ tmp1 = ct0(tmp1);
+ mod8 = and(end,#7);
+ if (!P0) jump end_check;
+ }
+ {
+ P0 = cmp.eq(tmp1,#32);
+ if (!P0.new) jump:nt exit_found;
+ if (!P0.new) start = add(obo,tmp1);
+ }
+ {
+ start = add(start,#8);
+ jump dw_loop;
+ } /* might be nice to combine these jumps... */
+
+
+end_check:
+ {
+ P0 = cmp.gt(tmp1,mod8);
+ if (P0.new) jump:nt exit_error; /* neverfound! */
+ start = add(obo,tmp1);
+ }
+
+exit_found:
+ {
+ R0 = sub(start,isrc);
+ jumpr R31;
+ }
+
+exit_error:
+ {
+ R0 = add(max,#1);
+ jumpr R31;
+ }
+
+ /* Uh, what does the "fixup" return here? */
+ .falign
+fix_1:
+ {
+ R0 = #0;
+ jumpr R31;
+ }
+
+ .size fname,.-fname
+
+
+.section __ex_table,"a"
+.long fail_1,fix_1
+.long fail_2,fix_1
+.previous
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
new file mode 100644
index 000000000..c599eb126
--- /dev/null
+++ b/arch/hexagon/mm/uaccess.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * Support for user memory access from kernel. This will
+ * probably be inlined for performance at some point, but
+ * for ease of debug, and to a lesser degree for code size,
+ * we implement here as subroutines.
+ */
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+
+/*
+ * For clear_user(), exploit previously defined copy_to_user function
+ * and the fact that we've got a handy zero page defined in kernel/head.S
+ *
+ * dczero here would be even faster.
+ */
+__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
+{
+ long uncleared;
+
+ while (count > PAGE_SIZE) {
+ uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
+ if (uncleared)
+ return count - (PAGE_SIZE - uncleared);
+ count -= PAGE_SIZE;
+ dest += PAGE_SIZE;
+ }
+ if (count)
+ count = raw_copy_to_user(dest, &empty_zero_page, count);
+
+ return count;
+}
+
+unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
+{
+ if (!access_ok(VERIFY_WRITE, dest, count))
+ return count;
+ else
+ return __clear_user_hexagon(dest, count);
+}
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
new file mode 100644
index 000000000..eb263e61d
--- /dev/null
+++ b/arch/hexagon/mm/vm_fault.c
@@ -0,0 +1,197 @@
+/*
+ * Memory fault handling for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * Page fault handling for the Hexagon Virtual Machine.
+ * Can also be called by a native port emulating the HVM
+ * execptions.
+ */
+
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/extable.h>
+#include <linux/hardirq.h>
+
+/*
+ * Decode of hardware exception sends us to one of several
+ * entry points. At each, we generate canonical arguments
+ * for handling by the abstract memory management code.
+ */
+#define FLT_IFETCH -1
+#define FLT_LOAD 0
+#define FLT_STORE 1
+
+
+/*
+ * Canonical page fault handler
+ */
+void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ int si_signo;
+ int si_code = SEGV_MAPERR;
+ vm_fault_t fault;
+ const struct exception_table_entry *fixup;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ /*
+ * If we're in an interrupt or have no user context,
+ * then must not take the fault.
+ */
+ if (unlikely(in_interrupt() || !mm))
+ goto no_context;
+
+ local_irq_enable();
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+retry:
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+
+ if (vma->vm_start <= address)
+ goto good_area;
+
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+good_area:
+ /* Address space is OK. Now check access rights. */
+ si_code = SEGV_ACCERR;
+
+ switch (cause) {
+ case FLT_IFETCH:
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ break;
+ case FLT_LOAD:
+ if (!(vma->vm_flags & VM_READ))
+ goto bad_area;
+ break;
+ case FLT_STORE:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ break;
+ }
+
+ fault = handle_mm_fault(vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
+ /* The most common case -- we are done. */
+ if (likely(!(fault & VM_FAULT_ERROR))) {
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+ }
+
+ up_read(&mm->mmap_sem);
+
+ /* Handle copyin/out exception cases */
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_OOM) {
+ pagefault_out_of_memory();
+ return;
+ }
+
+ /* User-mode address is in the memory map, but we are
+ * unable to fix up the page fault.
+ */
+ if (fault & VM_FAULT_SIGBUS) {
+ si_signo = SIGBUS;
+ si_code = BUS_ADRERR;
+ }
+ /* Address is not in the memory map */
+ else {
+ si_signo = SIGSEGV;
+ si_code = SEGV_ACCERR;
+ }
+ force_sig_fault(si_signo, si_code, (void __user *)address, current);
+ return;
+
+bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);
+ return;
+ }
+ /* Kernel-mode fault falls through */
+
+no_context:
+ fixup = search_exception_tables(pt_elr(regs));
+ if (fixup) {
+ pt_set_elr(regs, fixup->fixup);
+ return;
+ }
+
+ /* Things are looking very, very bad now */
+ bust_spinlocks(1);
+ printk(KERN_EMERG "Unable to handle kernel paging request at "
+ "virtual address 0x%08lx, regs %p\n", address, regs);
+ die("Bad Kernel VA", regs, SIGKILL);
+}
+
+
+void read_protection_fault(struct pt_regs *regs)
+{
+ unsigned long badvadr = pt_badva(regs);
+
+ do_page_fault(badvadr, FLT_LOAD, regs);
+}
+
+void write_protection_fault(struct pt_regs *regs)
+{
+ unsigned long badvadr = pt_badva(regs);
+
+ do_page_fault(badvadr, FLT_STORE, regs);
+}
+
+void execute_protection_fault(struct pt_regs *regs)
+{
+ unsigned long badvadr = pt_badva(regs);
+
+ do_page_fault(badvadr, FLT_IFETCH, regs);
+}
diff --git a/arch/hexagon/mm/vm_tlb.c b/arch/hexagon/mm/vm_tlb.c
new file mode 100644
index 000000000..b47406553
--- /dev/null
+++ b/arch/hexagon/mm/vm_tlb.c
@@ -0,0 +1,94 @@
+/*
+ * Hexagon Virtual Machine TLB functions
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The Hexagon Virtual Machine conceals the real workings of
+ * the TLB, but there are one or two functions that need to
+ * be instantiated for it, differently from a native build.
+ */
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/hexagon_vm.h>
+
+/*
+ * Initial VM implementation has only one map active at a time, with
+ * TLB purgings on changes. So either we're nuking the current map,
+ * or it's a no-op. This operation is messy on true SMPs where other
+ * processors must be induced to flush the copies in their local TLBs,
+ * but Hexagon thread-based virtual processors share the same MMU.
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context.ptbase == current->active_mm->context.ptbase)
+ __vmclrmap((void *)start, end - start);
+}
+
+/*
+ * Flush a page from the kernel virtual map - used by highmem
+ */
+void flush_tlb_one(unsigned long vaddr)
+{
+ __vmclrmap((void *)vaddr, PAGE_SIZE);
+}
+
+/*
+ * Flush all TLBs across all CPUs, virtual or real.
+ * A single Hexagon core has 6 thread contexts but
+ * only one TLB.
+ */
+void tlb_flush_all(void)
+{
+ /* should probably use that fixaddr end or whateve label */
+ __vmclrmap(0, 0xffff0000);
+}
+
+/*
+ * Flush TLB entries associated with a given mm_struct mapping.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ /* Current Virtual Machine has only one map active at a time */
+ if (current->active_mm->context.ptbase == mm->context.ptbase)
+ tlb_flush_all();
+}
+
+/*
+ * Flush TLB state associated with a page of a vma.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context.ptbase == current->active_mm->context.ptbase)
+ __vmclrmap((void *)vaddr, PAGE_SIZE);
+}
+
+/*
+ * Flush TLB entries associated with a kernel address range.
+ * Like flush range, but without the check on the vma->vm_mm.
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ __vmclrmap((void *)start, end - start);
+}