summaryrefslogtreecommitdiffstats
path: root/src/cmd/internal/obj/mips
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
commit47ab3d4a42e9ab51c465c4322d2ec233f6324e6b (patch)
treea61a0ffd83f4a3def4b36e5c8e99630c559aa723 /src/cmd/internal/obj/mips
parentInitial commit. (diff)
downloadgolang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.tar.xz
golang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.zip
Adding upstream version 1.18.10.upstream/1.18.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/cmd/internal/obj/mips')
-rw-r--r--src/cmd/internal/obj/mips/a.out.go483
-rw-r--r--src/cmd/internal/obj/mips/anames.go137
-rw-r--r--src/cmd/internal/obj/mips/anames0.go45
-rw-r--r--src/cmd/internal/obj/mips/asm0.go2121
-rw-r--r--src/cmd/internal/obj/mips/list0.go83
-rw-r--r--src/cmd/internal/obj/mips/obj0.go1522
6 files changed, 4391 insertions, 0 deletions
diff --git a/src/cmd/internal/obj/mips/a.out.go b/src/cmd/internal/obj/mips/a.out.go
new file mode 100644
index 0000000..c6ce53a
--- /dev/null
+++ b/src/cmd/internal/obj/mips/a.out.go
@@ -0,0 +1,483 @@
+// cmd/9c/9.out.h from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package mips
+
+import (
+ "cmd/internal/obj"
+)
+
+//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p mips
+
+/*
+ * mips 64
+ */
+const (
+ NSNAME = 8
+ NSYM = 50
+ NREG = 32 /* number of general registers */
+ NFREG = 32 /* number of floating point registers */
+ NWREG = 32 /* number of MSA registers */
+)
+
+const (
+ REG_R0 = obj.RBaseMIPS + iota // must be a multiple of 32
+ REG_R1
+ REG_R2
+ REG_R3
+ REG_R4
+ REG_R5
+ REG_R6
+ REG_R7
+ REG_R8
+ REG_R9
+ REG_R10
+ REG_R11
+ REG_R12
+ REG_R13
+ REG_R14
+ REG_R15
+ REG_R16
+ REG_R17
+ REG_R18
+ REG_R19
+ REG_R20
+ REG_R21
+ REG_R22
+ REG_R23
+ REG_R24
+ REG_R25
+ REG_R26
+ REG_R27
+ REG_R28
+ REG_R29
+ REG_R30
+ REG_R31
+
+ REG_F0 // must be a multiple of 32
+ REG_F1
+ REG_F2
+ REG_F3
+ REG_F4
+ REG_F5
+ REG_F6
+ REG_F7
+ REG_F8
+ REG_F9
+ REG_F10
+ REG_F11
+ REG_F12
+ REG_F13
+ REG_F14
+ REG_F15
+ REG_F16
+ REG_F17
+ REG_F18
+ REG_F19
+ REG_F20
+ REG_F21
+ REG_F22
+ REG_F23
+ REG_F24
+ REG_F25
+ REG_F26
+ REG_F27
+ REG_F28
+ REG_F29
+ REG_F30
+ REG_F31
+
+ // co-processor 0 control registers
+ REG_M0 // must be a multiple of 32
+ REG_M1
+ REG_M2
+ REG_M3
+ REG_M4
+ REG_M5
+ REG_M6
+ REG_M7
+ REG_M8
+ REG_M9
+ REG_M10
+ REG_M11
+ REG_M12
+ REG_M13
+ REG_M14
+ REG_M15
+ REG_M16
+ REG_M17
+ REG_M18
+ REG_M19
+ REG_M20
+ REG_M21
+ REG_M22
+ REG_M23
+ REG_M24
+ REG_M25
+ REG_M26
+ REG_M27
+ REG_M28
+ REG_M29
+ REG_M30
+ REG_M31
+
+ // FPU control registers
+ REG_FCR0 // must be a multiple of 32
+ REG_FCR1
+ REG_FCR2
+ REG_FCR3
+ REG_FCR4
+ REG_FCR5
+ REG_FCR6
+ REG_FCR7
+ REG_FCR8
+ REG_FCR9
+ REG_FCR10
+ REG_FCR11
+ REG_FCR12
+ REG_FCR13
+ REG_FCR14
+ REG_FCR15
+ REG_FCR16
+ REG_FCR17
+ REG_FCR18
+ REG_FCR19
+ REG_FCR20
+ REG_FCR21
+ REG_FCR22
+ REG_FCR23
+ REG_FCR24
+ REG_FCR25
+ REG_FCR26
+ REG_FCR27
+ REG_FCR28
+ REG_FCR29
+ REG_FCR30
+ REG_FCR31
+
+ // MSA registers
+ // The lower bits of W registers are alias to F registers
+ REG_W0 // must be a multiple of 32
+ REG_W1
+ REG_W2
+ REG_W3
+ REG_W4
+ REG_W5
+ REG_W6
+ REG_W7
+ REG_W8
+ REG_W9
+ REG_W10
+ REG_W11
+ REG_W12
+ REG_W13
+ REG_W14
+ REG_W15
+ REG_W16
+ REG_W17
+ REG_W18
+ REG_W19
+ REG_W20
+ REG_W21
+ REG_W22
+ REG_W23
+ REG_W24
+ REG_W25
+ REG_W26
+ REG_W27
+ REG_W28
+ REG_W29
+ REG_W30
+ REG_W31
+
+ REG_HI
+ REG_LO
+
+ REG_LAST = REG_LO // the last defined register
+
+ REG_SPECIAL = REG_M0
+
+ REGZERO = REG_R0 /* set to zero */
+ REGSP = REG_R29
+ REGSB = REG_R28
+ REGLINK = REG_R31
+ REGRET = REG_R1
+ REGARG = -1 /* -1 disables passing the first argument in register */
+ REGRT1 = REG_R1 /* reserved for runtime, duffzero and duffcopy */
+ REGRT2 = REG_R2 /* reserved for runtime, duffcopy */
+ REGCTXT = REG_R22 /* context for closures */
+ REGG = REG_R30 /* G */
+ REGTMP = REG_R23 /* used by the linker */
+ FREGRET = REG_F0
+)
+
+// https://llvm.org/svn/llvm-project/llvm/trunk/lib/Target/Mips/MipsRegisterInfo.td search for DwarfRegNum
+// https://gcc.gnu.org/viewcvs/gcc/trunk/gcc/config/mips/mips.c?view=co&revision=258099&content-type=text%2Fplain search for mips_dwarf_regno
+// For now, this is adequate for both 32 and 64 bit.
+var MIPSDWARFRegisters = map[int16]int16{}
+
+func init() {
+ // f assigns dwarfregisters[from:to] = (base):(to-from+base)
+ f := func(from, to, base int16) {
+ for r := int16(from); r <= to; r++ {
+ MIPSDWARFRegisters[r] = (r - from) + base
+ }
+ }
+ f(REG_R0, REG_R31, 0)
+ f(REG_F0, REG_F31, 32) // For 32-bit MIPS, compiler only uses even numbered registers -- see cmd/compile/internal/ssa/gen/MIPSOps.go
+ MIPSDWARFRegisters[REG_HI] = 64
+ MIPSDWARFRegisters[REG_LO] = 65
+ // The lower bits of W registers are alias to F registers
+ f(REG_W0, REG_W31, 32)
+}
+
+const (
+ BIG = 32766
+)
+
+const (
+ /* mark flags */
+ FOLL = 1 << 0
+ LABEL = 1 << 1
+ LEAF = 1 << 2
+ SYNC = 1 << 3
+ BRANCH = 1 << 4
+ LOAD = 1 << 5
+ FCMP = 1 << 6
+ NOSCHED = 1 << 7
+
+ NSCHED = 20
+)
+
+const (
+ C_NONE = iota
+ C_REG
+ C_FREG
+ C_FCREG
+ C_MREG /* special processor register */
+ C_WREG /* MSA registers */
+ C_HI
+ C_LO
+ C_ZCON
+ C_SCON /* 16 bit signed */
+ C_UCON /* 32 bit signed, low 16 bits 0 */
+ C_ADD0CON
+ C_AND0CON
+ C_ADDCON /* -0x8000 <= v < 0 */
+ C_ANDCON /* 0 < v <= 0xFFFF */
+ C_LCON /* other 32 */
+ C_DCON /* other 64 (could subdivide further) */
+ C_SACON /* $n(REG) where n <= int16 */
+ C_SECON
+ C_LACON /* $n(REG) where int16 < n <= int32 */
+ C_LECON
+ C_DACON /* $n(REG) where int32 < n */
+ C_STCON /* $tlsvar */
+ C_SBRA
+ C_LBRA
+ C_SAUTO
+ C_LAUTO
+ C_SEXT
+ C_LEXT
+ C_ZOREG
+ C_SOREG
+ C_LOREG
+ C_GOK
+ C_ADDR
+ C_TLS
+ C_TEXTSIZE
+
+ C_NCLASS /* must be the last */
+)
+
+const (
+ AABSD = obj.ABaseMIPS + obj.A_ARCHSPECIFIC + iota
+ AABSF
+ AABSW
+ AADD
+ AADDD
+ AADDF
+ AADDU
+ AADDW
+ AAND
+ ABEQ
+ ABFPF
+ ABFPT
+ ABGEZ
+ ABGEZAL
+ ABGTZ
+ ABLEZ
+ ABLTZ
+ ABLTZAL
+ ABNE
+ ABREAK
+ ACLO
+ ACLZ
+ ACMOVF
+ ACMOVN
+ ACMOVT
+ ACMOVZ
+ ACMPEQD
+ ACMPEQF
+ ACMPGED
+ ACMPGEF
+ ACMPGTD
+ ACMPGTF
+ ADIV
+ ADIVD
+ ADIVF
+ ADIVU
+ ADIVW
+ AGOK
+ ALL
+ ALLV
+ ALUI
+ AMADD
+ AMOVB
+ AMOVBU
+ AMOVD
+ AMOVDF
+ AMOVDW
+ AMOVF
+ AMOVFD
+ AMOVFW
+ AMOVH
+ AMOVHU
+ AMOVW
+ AMOVWD
+ AMOVWF
+ AMOVWL
+ AMOVWR
+ AMSUB
+ AMUL
+ AMULD
+ AMULF
+ AMULU
+ AMULW
+ ANEGD
+ ANEGF
+ ANEGW
+ ANEGV
+ ANOOP // hardware nop
+ ANOR
+ AOR
+ AREM
+ AREMU
+ ARFE
+ AROTR
+ AROTRV
+ ASC
+ ASCV
+ ASGT
+ ASGTU
+ ASLL
+ ASQRTD
+ ASQRTF
+ ASRA
+ ASRL
+ ASUB
+ ASUBD
+ ASUBF
+ ASUBU
+ ASUBW
+ ASYNC
+ ASYSCALL
+ ATEQ
+ ATLBP
+ ATLBR
+ ATLBWI
+ ATLBWR
+ ATNE
+ AWORD
+ AXOR
+
+ /* 64-bit */
+ AMOVV
+ AMOVVL
+ AMOVVR
+ ASLLV
+ ASRAV
+ ASRLV
+ ADIVV
+ ADIVVU
+ AREMV
+ AREMVU
+ AMULV
+ AMULVU
+ AADDV
+ AADDVU
+ ASUBV
+ ASUBVU
+
+ /* 64-bit FP */
+ ATRUNCFV
+ ATRUNCDV
+ ATRUNCFW
+ ATRUNCDW
+ AMOVWU
+ AMOVFV
+ AMOVDV
+ AMOVVF
+ AMOVVD
+
+ /* MSA */
+ AVMOVB
+ AVMOVH
+ AVMOVW
+ AVMOVD
+
+ ALAST
+
+ // aliases
+ AJMP = obj.AJMP
+ AJAL = obj.ACALL
+ ARET = obj.ARET
+)
+
+func init() {
+ // The asm encoder generally assumes that the lowest 5 bits of the
+ // REG_XX constants match the machine instruction encoding, i.e.
+ // the lowest 5 bits is the register number.
+ // Check this here.
+ if REG_R0%32 != 0 {
+ panic("REG_R0 is not a multiple of 32")
+ }
+ if REG_F0%32 != 0 {
+ panic("REG_F0 is not a multiple of 32")
+ }
+ if REG_M0%32 != 0 {
+ panic("REG_M0 is not a multiple of 32")
+ }
+ if REG_FCR0%32 != 0 {
+ panic("REG_FCR0 is not a multiple of 32")
+ }
+ if REG_W0%32 != 0 {
+ panic("REG_W0 is not a multiple of 32")
+ }
+}
diff --git a/src/cmd/internal/obj/mips/anames.go b/src/cmd/internal/obj/mips/anames.go
new file mode 100644
index 0000000..ca2ad5a
--- /dev/null
+++ b/src/cmd/internal/obj/mips/anames.go
@@ -0,0 +1,137 @@
+// Code generated by stringer -i a.out.go -o anames.go -p mips; DO NOT EDIT.
+
+package mips
+
+import "cmd/internal/obj"
+
+var Anames = []string{
+ obj.A_ARCHSPECIFIC: "ABSD",
+ "ABSF",
+ "ABSW",
+ "ADD",
+ "ADDD",
+ "ADDF",
+ "ADDU",
+ "ADDW",
+ "AND",
+ "BEQ",
+ "BFPF",
+ "BFPT",
+ "BGEZ",
+ "BGEZAL",
+ "BGTZ",
+ "BLEZ",
+ "BLTZ",
+ "BLTZAL",
+ "BNE",
+ "BREAK",
+ "CLO",
+ "CLZ",
+ "CMOVF",
+ "CMOVN",
+ "CMOVT",
+ "CMOVZ",
+ "CMPEQD",
+ "CMPEQF",
+ "CMPGED",
+ "CMPGEF",
+ "CMPGTD",
+ "CMPGTF",
+ "DIV",
+ "DIVD",
+ "DIVF",
+ "DIVU",
+ "DIVW",
+ "GOK",
+ "LL",
+ "LLV",
+ "LUI",
+ "MADD",
+ "MOVB",
+ "MOVBU",
+ "MOVD",
+ "MOVDF",
+ "MOVDW",
+ "MOVF",
+ "MOVFD",
+ "MOVFW",
+ "MOVH",
+ "MOVHU",
+ "MOVW",
+ "MOVWD",
+ "MOVWF",
+ "MOVWL",
+ "MOVWR",
+ "MSUB",
+ "MUL",
+ "MULD",
+ "MULF",
+ "MULU",
+ "MULW",
+ "NEGD",
+ "NEGF",
+ "NEGW",
+ "NEGV",
+ "NOOP",
+ "NOR",
+ "OR",
+ "REM",
+ "REMU",
+ "RFE",
+ "ROTR",
+ "ROTRV",
+ "SC",
+ "SCV",
+ "SGT",
+ "SGTU",
+ "SLL",
+ "SQRTD",
+ "SQRTF",
+ "SRA",
+ "SRL",
+ "SUB",
+ "SUBD",
+ "SUBF",
+ "SUBU",
+ "SUBW",
+ "SYNC",
+ "SYSCALL",
+ "TEQ",
+ "TLBP",
+ "TLBR",
+ "TLBWI",
+ "TLBWR",
+ "TNE",
+ "WORD",
+ "XOR",
+ "MOVV",
+ "MOVVL",
+ "MOVVR",
+ "SLLV",
+ "SRAV",
+ "SRLV",
+ "DIVV",
+ "DIVVU",
+ "REMV",
+ "REMVU",
+ "MULV",
+ "MULVU",
+ "ADDV",
+ "ADDVU",
+ "SUBV",
+ "SUBVU",
+ "TRUNCFV",
+ "TRUNCDV",
+ "TRUNCFW",
+ "TRUNCDW",
+ "MOVWU",
+ "MOVFV",
+ "MOVDV",
+ "MOVVF",
+ "MOVVD",
+ "VMOVB",
+ "VMOVH",
+ "VMOVW",
+ "VMOVD",
+ "LAST",
+}
diff --git a/src/cmd/internal/obj/mips/anames0.go b/src/cmd/internal/obj/mips/anames0.go
new file mode 100644
index 0000000..c300696
--- /dev/null
+++ b/src/cmd/internal/obj/mips/anames0.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+var cnames0 = []string{
+ "NONE",
+ "REG",
+ "FREG",
+ "FCREG",
+ "MREG",
+ "WREG",
+ "HI",
+ "LO",
+ "ZCON",
+ "SCON",
+ "UCON",
+ "ADD0CON",
+ "AND0CON",
+ "ADDCON",
+ "ANDCON",
+ "LCON",
+ "DCON",
+ "SACON",
+ "SECON",
+ "LACON",
+ "LECON",
+ "DACON",
+ "STCON",
+ "SBRA",
+ "LBRA",
+ "SAUTO",
+ "LAUTO",
+ "SEXT",
+ "LEXT",
+ "ZOREG",
+ "SOREG",
+ "LOREG",
+ "GOK",
+ "ADDR",
+ "TLS",
+ "TEXTSIZE",
+ "NCLASS",
+}
diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go
new file mode 100644
index 0000000..e475ffd
--- /dev/null
+++ b/src/cmd/internal/obj/mips/asm0.go
@@ -0,0 +1,2121 @@
+// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package mips
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+ "fmt"
+ "log"
+ "sort"
+)
+
+// ctxt0 holds state while assembling a single function.
+// Each function gets a fresh ctxt0.
+// This allows for multiple functions to be safely concurrently assembled.
+type ctxt0 struct {
+ ctxt *obj.Link
+ newprog obj.ProgAlloc
+ cursym *obj.LSym
+ autosize int32
+ instoffset int64
+ pc int64
+}
+
+// Instruction layout.
+
+const (
+ mips64FuncAlign = 8
+)
+
+const (
+ r0iszero = 1
+)
+
+type Optab struct {
+ as obj.As
+ a1 uint8
+ a2 uint8
+ a3 uint8
+ type_ int8
+ size int8
+ param int16
+ family sys.ArchFamily // 0 means both sys.MIPS and sys.MIPS64
+ flag uint8
+}
+
+const (
+ // Optab.flag
+ NOTUSETMP = 1 << iota // p expands to multiple instructions, but does NOT use REGTMP
+)
+
+var optab = []Optab{
+ {obj.ATEXT, C_LEXT, C_NONE, C_TEXTSIZE, 0, 0, 0, sys.MIPS64, 0},
+ {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0},
+
+ {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ {AMOVV, C_REG, C_NONE, C_REG, 1, 4, 0, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_REG, 12, 8, 0, 0, NOTUSETMP},
+ {AMOVBU, C_REG, C_NONE, C_REG, 13, 4, 0, 0, 0},
+ {AMOVWU, C_REG, C_NONE, C_REG, 14, 8, 0, sys.MIPS64, NOTUSETMP},
+
+ {ASUB, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0},
+ {ASUBV, C_REG, C_REG, C_REG, 2, 4, 0, sys.MIPS64, 0},
+ {AADD, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0},
+ {AADDV, C_REG, C_REG, C_REG, 2, 4, 0, sys.MIPS64, 0},
+ {AAND, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0},
+ {ASUB, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ {ASUBV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0},
+ {AADD, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ {AADDV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0},
+ {AAND, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ {ACMOVN, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0},
+ {ANEGW, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ {ANEGV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0},
+
+ {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0},
+ {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0},
+ {ASLLV, C_REG, C_NONE, C_REG, 9, 4, 0, sys.MIPS64, 0},
+ {ASLLV, C_REG, C_REG, C_REG, 9, 4, 0, sys.MIPS64, 0},
+ {ACLO, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0},
+
+ {AADDF, C_FREG, C_NONE, C_FREG, 32, 4, 0, 0, 0},
+ {AADDF, C_FREG, C_REG, C_FREG, 32, 4, 0, 0, 0},
+ {ACMPEQF, C_FREG, C_REG, C_NONE, 32, 4, 0, 0, 0},
+ {AABSF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0},
+ {AMOVVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, sys.MIPS64, 0},
+ {AMOVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0},
+ {AMOVD, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0},
+
+ {AMOVW, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0},
+ {AMOVWU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0},
+ {AMOVV, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0},
+ {AMOVBU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0},
+ {AMOVWL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0},
+ {AMOVVL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0},
+ {AMOVWU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0},
+ {AMOVV, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0},
+ {AMOVBU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0},
+ {AMOVWL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0},
+ {AMOVVL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0},
+ {AMOVW, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0},
+ {AMOVWU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0},
+ {AMOVBU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0},
+ {AMOVWL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0},
+ {AMOVVL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
+ {ASC, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0},
+ {ASCV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
+
+ {AMOVW, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0},
+ {AMOVWU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0},
+ {AMOVV, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0},
+ {AMOVB, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0},
+ {AMOVBU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0},
+ {AMOVWL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0},
+ {AMOVVL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0},
+ {AMOVWU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0},
+ {AMOVV, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0},
+ {AMOVB, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0},
+ {AMOVBU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0},
+ {AMOVWL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0},
+ {AMOVVL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0},
+ {AMOVW, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
+ {AMOVWU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVB, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
+ {AMOVBU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
+ {AMOVWL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
+ {AMOVVL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0},
+ {ALL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
+ {ALLV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0},
+
+ {AMOVW, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0},
+ {AMOVWU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0},
+ {AMOVV, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0},
+ {AMOVBU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0},
+ {AMOVWU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.MIPS64, 0},
+ {AMOVV, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0},
+ {AMOVBU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0},
+ {AMOVW, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0},
+ {AMOVWU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.MIPS64, 0},
+ {AMOVV, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0},
+ {AMOVBU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0},
+ {ASC, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0},
+ {AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0},
+ {AMOVW, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0},
+ {AMOVWU, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0},
+ {AMOVV, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0},
+ {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0},
+ {AMOVB, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0},
+ {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0},
+ {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0},
+ {AMOVW, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP},
+ {AMOVWU, C_REG, C_NONE, C_TLS, 53, 8, 0, sys.MIPS64, NOTUSETMP},
+ {AMOVV, C_REG, C_NONE, C_TLS, 53, 8, 0, sys.MIPS64, NOTUSETMP},
+ {AMOVB, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP},
+ {AMOVBU, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP},
+
+ {AMOVW, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0},
+ {AMOVWU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0},
+ {AMOVV, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0},
+ {AMOVB, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0},
+ {AMOVBU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0},
+ {AMOVWU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.MIPS64, 0},
+ {AMOVV, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.MIPS64, 0},
+ {AMOVB, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0},
+ {AMOVBU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0},
+ {AMOVW, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0},
+ {AMOVWU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.MIPS64, 0},
+ {AMOVV, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.MIPS64, 0},
+ {AMOVB, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0},
+ {AMOVBU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0},
+ {AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0},
+ {AMOVW, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0},
+ {AMOVWU, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0},
+ {AMOVV, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0},
+ {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0},
+ {AMOVB, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0},
+ {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0},
+ {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0},
+ {AMOVW, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP},
+ {AMOVWU, C_TLS, C_NONE, C_REG, 54, 8, 0, sys.MIPS64, NOTUSETMP},
+ {AMOVV, C_TLS, C_NONE, C_REG, 54, 8, 0, sys.MIPS64, NOTUSETMP},
+ {AMOVB, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP},
+ {AMOVBU, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP},
+
+ {AMOVW, C_SECON, C_NONE, C_REG, 3, 4, REGSB, sys.MIPS64, 0},
+ {AMOVV, C_SECON, C_NONE, C_REG, 3, 4, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_SACON, C_NONE, C_REG, 3, 4, REGSP, 0, 0},
+ {AMOVV, C_SACON, C_NONE, C_REG, 3, 4, REGSP, sys.MIPS64, 0},
+ {AMOVW, C_LECON, C_NONE, C_REG, 52, 8, REGSB, sys.MIPS, NOTUSETMP},
+ {AMOVW, C_LECON, C_NONE, C_REG, 52, 12, REGSB, sys.MIPS64, NOTUSETMP},
+ {AMOVV, C_LECON, C_NONE, C_REG, 52, 12, REGSB, sys.MIPS64, NOTUSETMP},
+
+ {AMOVW, C_LACON, C_NONE, C_REG, 26, 12, REGSP, 0, 0},
+ {AMOVV, C_LACON, C_NONE, C_REG, 26, 12, REGSP, sys.MIPS64, 0},
+ {AMOVW, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0},
+ {AMOVV, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVW, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0},
+ {AMOVV, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVW, C_STCON, C_NONE, C_REG, 55, 8, 0, 0, NOTUSETMP},
+ {AMOVV, C_STCON, C_NONE, C_REG, 55, 8, 0, sys.MIPS64, NOTUSETMP},
+
+ {AMOVW, C_UCON, C_NONE, C_REG, 24, 4, 0, 0, 0},
+ {AMOVV, C_UCON, C_NONE, C_REG, 24, 4, 0, sys.MIPS64, 0},
+ {AMOVW, C_LCON, C_NONE, C_REG, 19, 8, 0, 0, NOTUSETMP},
+ {AMOVV, C_LCON, C_NONE, C_REG, 19, 8, 0, sys.MIPS64, NOTUSETMP},
+
+ {AMOVW, C_HI, C_NONE, C_REG, 20, 4, 0, 0, 0},
+ {AMOVV, C_HI, C_NONE, C_REG, 20, 4, 0, sys.MIPS64, 0},
+ {AMOVW, C_LO, C_NONE, C_REG, 20, 4, 0, 0, 0},
+ {AMOVV, C_LO, C_NONE, C_REG, 20, 4, 0, sys.MIPS64, 0},
+ {AMOVW, C_REG, C_NONE, C_HI, 21, 4, 0, 0, 0},
+ {AMOVV, C_REG, C_NONE, C_HI, 21, 4, 0, sys.MIPS64, 0},
+ {AMOVW, C_REG, C_NONE, C_LO, 21, 4, 0, 0, 0},
+ {AMOVV, C_REG, C_NONE, C_LO, 21, 4, 0, sys.MIPS64, 0},
+
+ {AMUL, C_REG, C_REG, C_NONE, 22, 4, 0, 0, 0},
+ {AMUL, C_REG, C_REG, C_REG, 22, 4, 0, 0, 0},
+ {AMULV, C_REG, C_REG, C_NONE, 22, 4, 0, sys.MIPS64, 0},
+
+ {AADD, C_ADD0CON, C_REG, C_REG, 4, 4, 0, 0, 0},
+ {AADD, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, 0, 0},
+ {AADD, C_ANDCON, C_REG, C_REG, 10, 8, 0, 0, 0},
+ {AADD, C_ANDCON, C_NONE, C_REG, 10, 8, 0, 0, 0},
+
+ {AADDV, C_ADD0CON, C_REG, C_REG, 4, 4, 0, sys.MIPS64, 0},
+ {AADDV, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, sys.MIPS64, 0},
+ {AADDV, C_ANDCON, C_REG, C_REG, 10, 8, 0, sys.MIPS64, 0},
+ {AADDV, C_ANDCON, C_NONE, C_REG, 10, 8, 0, sys.MIPS64, 0},
+
+ {AAND, C_AND0CON, C_REG, C_REG, 4, 4, 0, 0, 0},
+ {AAND, C_AND0CON, C_NONE, C_REG, 4, 4, 0, 0, 0},
+ {AAND, C_ADDCON, C_REG, C_REG, 10, 8, 0, 0, 0},
+ {AAND, C_ADDCON, C_NONE, C_REG, 10, 8, 0, 0, 0},
+
+ {AADD, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0},
+ {AADD, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0},
+ {AADDV, C_UCON, C_REG, C_REG, 25, 8, 0, sys.MIPS64, 0},
+ {AADDV, C_UCON, C_NONE, C_REG, 25, 8, 0, sys.MIPS64, 0},
+ {AAND, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0},
+ {AAND, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0},
+
+ {AADD, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0},
+ {AADDV, C_LCON, C_NONE, C_REG, 23, 12, 0, sys.MIPS64, 0},
+ {AAND, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0},
+ {AADD, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0},
+ {AADDV, C_LCON, C_REG, C_REG, 23, 12, 0, sys.MIPS64, 0},
+ {AAND, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0},
+
+ {ASLL, C_SCON, C_REG, C_REG, 16, 4, 0, 0, 0},
+ {ASLL, C_SCON, C_NONE, C_REG, 16, 4, 0, 0, 0},
+
+ {ASLLV, C_SCON, C_REG, C_REG, 16, 4, 0, sys.MIPS64, 0},
+ {ASLLV, C_SCON, C_NONE, C_REG, 16, 4, 0, sys.MIPS64, 0},
+
+ {ASYSCALL, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0},
+
+ {ABEQ, C_REG, C_REG, C_SBRA, 6, 4, 0, 0, 0},
+ {ABEQ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0},
+ {ABLEZ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0},
+ {ABFPT, C_NONE, C_NONE, C_SBRA, 6, 8, 0, 0, NOTUSETMP},
+
+ {AJMP, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0},
+ {AJAL, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0},
+
+ {AJMP, C_NONE, C_NONE, C_ZOREG, 18, 4, REGZERO, 0, 0},
+ {AJAL, C_NONE, C_NONE, C_ZOREG, 18, 4, REGLINK, 0, 0},
+
+ {AMOVW, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0},
+ {AMOVF, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0},
+ {AMOVD, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, sys.MIPS64, 0},
+ {AMOVF, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0},
+ {AMOVD, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0},
+ {AMOVW, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVF, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0},
+ {AMOVD, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0},
+
+ {AMOVW, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0},
+ {AMOVF, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0},
+ {AMOVD, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, sys.MIPS64, 0},
+ {AMOVF, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0},
+ {AMOVD, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0},
+ {AMOVW, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, sys.MIPS64, 0},
+ {AMOVF, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0},
+ {AMOVD, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0},
+ {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.MIPS, 0},
+ {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 12, 0, sys.MIPS64, 0},
+ {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.MIPS, 0},
+ {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 12, 0, sys.MIPS64, 0},
+
+ {AMOVW, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0},
+ {AMOVF, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0},
+ {AMOVD, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, sys.MIPS64, 0},
+ {AMOVF, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0},
+ {AMOVD, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0},
+ {AMOVW, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, sys.MIPS64, 0},
+ {AMOVF, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0},
+ {AMOVD, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0},
+
+ {AMOVW, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0},
+ {AMOVF, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0},
+ {AMOVD, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0},
+ {AMOVW, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, sys.MIPS64, 0},
+ {AMOVF, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0},
+ {AMOVD, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0},
+ {AMOVW, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, sys.MIPS64, 0},
+ {AMOVF, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0},
+ {AMOVD, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0},
+ {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0},
+ {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0},
+ {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0},
+ {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0},
+
+ {AMOVW, C_REG, C_NONE, C_FREG, 30, 4, 0, 0, 0},
+ {AMOVW, C_FREG, C_NONE, C_REG, 31, 4, 0, 0, 0},
+ {AMOVV, C_REG, C_NONE, C_FREG, 47, 4, 0, sys.MIPS64, 0},
+ {AMOVV, C_FREG, C_NONE, C_REG, 48, 4, 0, sys.MIPS64, 0},
+
+ {AMOVW, C_ADDCON, C_NONE, C_FREG, 34, 8, 0, sys.MIPS64, 0},
+ {AMOVW, C_ANDCON, C_NONE, C_FREG, 34, 8, 0, sys.MIPS64, 0},
+
+ {AMOVW, C_REG, C_NONE, C_MREG, 37, 4, 0, 0, 0},
+ {AMOVV, C_REG, C_NONE, C_MREG, 37, 4, 0, sys.MIPS64, 0},
+ {AMOVW, C_MREG, C_NONE, C_REG, 38, 4, 0, 0, 0},
+ {AMOVV, C_MREG, C_NONE, C_REG, 38, 4, 0, sys.MIPS64, 0},
+
+ {AWORD, C_LCON, C_NONE, C_NONE, 40, 4, 0, 0, 0},
+
+ {AMOVW, C_REG, C_NONE, C_FCREG, 41, 4, 0, 0, 0},
+ {AMOVV, C_REG, C_NONE, C_FCREG, 41, 4, 0, sys.MIPS64, 0},
+ {AMOVW, C_FCREG, C_NONE, C_REG, 42, 4, 0, 0, 0},
+ {AMOVV, C_FCREG, C_NONE, C_REG, 42, 4, 0, sys.MIPS64, 0},
+
+ {ATEQ, C_SCON, C_REG, C_REG, 15, 4, 0, 0, 0},
+ {ATEQ, C_SCON, C_NONE, C_REG, 15, 4, 0, 0, 0},
+ {ACMOVT, C_REG, C_NONE, C_REG, 17, 4, 0, 0, 0},
+
+ {AVMOVB, C_SCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0},
+ {AVMOVB, C_ADDCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0},
+ {AVMOVB, C_SOREG, C_NONE, C_WREG, 57, 4, 0, sys.MIPS64, 0},
+ {AVMOVB, C_WREG, C_NONE, C_SOREG, 58, 4, 0, sys.MIPS64, 0},
+
+ {ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, /* really CACHE instruction */
+ {ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0},
+ {ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
+ {ABREAK, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0},
+
+ {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0},
+ {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0},
+ {obj.AFUNCDATA, C_SCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0},
+ {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+ {obj.ANOP, C_LCON, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689
+ {obj.ANOP, C_REG, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+ {obj.ANOP, C_FREG, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+ {obj.ADUFFZERO, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP
+ {obj.ADUFFCOPY, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP
+
+ {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0},
+}
+
+var oprange [ALAST & obj.AMask][]Optab
+
+var xcmp [C_NCLASS][C_NCLASS]bool
+
+func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
+ if ctxt.Retpoline {
+ ctxt.Diag("-spectre=ret not supported on mips")
+ ctxt.Retpoline = false // don't keep printing
+ }
+
+ p := cursym.Func().Text
+ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
+ return
+ }
+
+ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset + ctxt.FixedFrameSize())}
+
+ if oprange[AOR&obj.AMask] == nil {
+ c.ctxt.Diag("mips ops not initialized, call mips.buildop first")
+ }
+
+ pc := int64(0)
+ p.Pc = pc
+
+ var m int
+ var o *Optab
+ for p = p.Link; p != nil; p = p.Link {
+ p.Pc = pc
+ o = c.oplook(p)
+ m = int(o.size)
+ if m == 0 {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ c.ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+
+ pc += int64(m)
+ }
+
+ c.cursym.Size = pc
+
+ /*
+ * if any procedure is large enough to
+ * generate a large SBRA branch, then
+ * generate extra passes putting branches
+ * around jmps to fix. this is rare.
+ */
+ bflag := 1
+
+ var otxt int64
+ var q *obj.Prog
+ for bflag != 0 {
+ bflag = 0
+ pc = 0
+ for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
+ p.Pc = pc
+ o = c.oplook(p)
+
+ // very large conditional branches
+ if o.type_ == 6 && p.To.Target() != nil {
+ otxt = p.To.Target().Pc - pc
+ if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 {
+ q = c.newprog()
+ q.Link = p.Link
+ p.Link = q
+ q.As = AJMP
+ q.Pos = p.Pos
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.SetTarget(p.To.Target())
+ p.To.SetTarget(q)
+ q = c.newprog()
+ q.Link = p.Link
+ p.Link = q
+ q.As = AJMP
+ q.Pos = p.Pos
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.SetTarget(q.Link.Link)
+
+ c.addnop(p.Link)
+ c.addnop(p)
+ bflag = 1
+ }
+ }
+
+ m = int(o.size)
+ if m == 0 {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ c.ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+
+ pc += int64(m)
+ }
+
+ c.cursym.Size = pc
+ }
+ if c.ctxt.Arch.Family == sys.MIPS64 {
+ pc += -pc & (mips64FuncAlign - 1)
+ }
+ c.cursym.Size = pc
+
+ /*
+ * lay out the code, emitting code and data relocations.
+ */
+
+ c.cursym.Grow(c.cursym.Size)
+
+ bp := c.cursym.P
+ var i int32
+ var out [4]uint32
+ for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
+ c.pc = p.Pc
+ o = c.oplook(p)
+ if int(o.size) > 4*len(out) {
+ log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p)
+ }
+ c.asmout(p, o, out[:])
+ for i = 0; i < int32(o.size/4); i++ {
+ c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
+ bp = bp[4:]
+ }
+ }
+
+ // Mark nonpreemptible instruction sequences.
+ // We use REGTMP as a scratch register during call injection,
+ // so instruction sequences that use REGTMP are unsafe to
+ // preempt asynchronously.
+ obj.MarkUnsafePoints(c.ctxt, c.cursym.Func().Text, c.newprog, c.isUnsafePoint, c.isRestartable)
+}
+
+// isUnsafePoint returns whether p is an unsafe point.
+func (c *ctxt0) isUnsafePoint(p *obj.Prog) bool {
+ // If p explicitly uses REGTMP, it's unsafe to preempt, because the
+ // preemption sequence clobbers REGTMP.
+ return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP
+}
+
+// isRestartable returns whether p is a multi-instruction sequence that,
+// if preempted, can be restarted.
+func (c *ctxt0) isRestartable(p *obj.Prog) bool {
+ if c.isUnsafePoint(p) {
+ return false
+ }
+ // If p is a multi-instruction sequence with uses REGTMP inserted by
+ // the assembler in order to materialize a large constant/offset, we
+ // can restart p (at the start of the instruction sequence), recompute
+ // the content of REGTMP, upon async preemption. Currently, all cases
+ // of assembler-inserted REGTMP fall into this category.
+ // If p doesn't use REGTMP, it can be simply preempted, so we don't
+ // mark it.
+ o := c.oplook(p)
+ return o.size > 4 && o.flag&NOTUSETMP == 0
+}
+
+func isint32(v int64) bool {
+ return int64(int32(v)) == v
+}
+
+func isuint32(v uint64) bool {
+ return uint64(uint32(v)) == v
+}
+
+func (c *ctxt0) aclass(a *obj.Addr) int {
+ switch a.Type {
+ case obj.TYPE_NONE:
+ return C_NONE
+
+ case obj.TYPE_REG:
+ if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
+ return C_REG
+ }
+ if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
+ return C_FREG
+ }
+ if REG_M0 <= a.Reg && a.Reg <= REG_M31 {
+ return C_MREG
+ }
+ if REG_FCR0 <= a.Reg && a.Reg <= REG_FCR31 {
+ return C_FCREG
+ }
+ if REG_W0 <= a.Reg && a.Reg <= REG_W31 {
+ return C_WREG
+ }
+ if a.Reg == REG_LO {
+ return C_LO
+ }
+ if a.Reg == REG_HI {
+ return C_HI
+ }
+ return C_GOK
+
+ case obj.TYPE_MEM:
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ if a.Sym == nil {
+ break
+ }
+ c.instoffset = a.Offset
+ if a.Sym != nil { // use relocation
+ if a.Sym.Type == objabi.STLSBSS {
+ return C_TLS
+ }
+ return C_ADDR
+ }
+ return C_LEXT
+
+ case obj.NAME_AUTO:
+ if a.Reg == REGSP {
+ // unset base register for better printing, since
+ // a.Offset is still relative to pseudo-SP.
+ a.Reg = obj.REG_NONE
+ }
+ c.instoffset = int64(c.autosize) + a.Offset
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case obj.NAME_PARAM:
+ if a.Reg == REGSP {
+ // unset base register for better printing, since
+ // a.Offset is still relative to pseudo-FP.
+ a.Reg = obj.REG_NONE
+ }
+ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case obj.NAME_NONE:
+ c.instoffset = a.Offset
+ if c.instoffset == 0 {
+ return C_ZOREG
+ }
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SOREG
+ }
+ return C_LOREG
+ }
+
+ return C_GOK
+
+ case obj.TYPE_TEXTSIZE:
+ return C_TEXTSIZE
+
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ switch a.Name {
+ case obj.NAME_NONE:
+ c.instoffset = a.Offset
+ if a.Reg != 0 {
+ if -BIG <= c.instoffset && c.instoffset <= BIG {
+ return C_SACON
+ }
+ if isint32(c.instoffset) {
+ return C_LACON
+ }
+ return C_DACON
+ }
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ s := a.Sym
+ if s == nil {
+ return C_GOK
+ }
+
+ c.instoffset = a.Offset
+ if s.Type == objabi.STLSBSS {
+ return C_STCON // address of TLS variable
+ }
+ return C_LECON
+
+ case obj.NAME_AUTO:
+ if a.Reg == REGSP {
+ // unset base register for better printing, since
+ // a.Offset is still relative to pseudo-SP.
+ a.Reg = obj.REG_NONE
+ }
+ c.instoffset = int64(c.autosize) + a.Offset
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+
+ case obj.NAME_PARAM:
+ if a.Reg == REGSP {
+ // unset base register for better printing, since
+ // a.Offset is still relative to pseudo-FP.
+ a.Reg = obj.REG_NONE
+ }
+ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+
+ default:
+ return C_GOK
+ }
+
+ if c.instoffset >= 0 {
+ if c.instoffset == 0 {
+ return C_ZCON
+ }
+ if c.instoffset <= 0x7fff {
+ return C_SCON
+ }
+ if c.instoffset <= 0xffff {
+ return C_ANDCON
+ }
+ if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
+ return C_UCON
+ }
+ if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
+ return C_LCON
+ }
+ return C_LCON // C_DCON
+ }
+
+ if c.instoffset >= -0x8000 {
+ return C_ADDCON
+ }
+ if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
+ return C_UCON
+ }
+ if isint32(c.instoffset) {
+ return C_LCON
+ }
+ return C_LCON // C_DCON
+
+ case obj.TYPE_BRANCH:
+ return C_SBRA
+ }
+
+ return C_GOK
+}
+
+func prasm(p *obj.Prog) {
+ fmt.Printf("%v\n", p)
+}
+
+func (c *ctxt0) oplook(p *obj.Prog) *Optab {
+ if oprange[AOR&obj.AMask] == nil {
+ c.ctxt.Diag("mips ops not initialized, call mips.buildop first")
+ }
+
+ a1 := int(p.Optab)
+ if a1 != 0 {
+ return &optab[a1-1]
+ }
+ a1 = int(p.From.Class)
+ if a1 == 0 {
+ a1 = c.aclass(&p.From) + 1
+ p.From.Class = int8(a1)
+ }
+
+ a1--
+ a3 := int(p.To.Class)
+ if a3 == 0 {
+ a3 = c.aclass(&p.To) + 1
+ p.To.Class = int8(a3)
+ }
+
+ a3--
+ a2 := C_NONE
+ if p.Reg != 0 {
+ a2 = C_REG
+ }
+
+ ops := oprange[p.As&obj.AMask]
+ c1 := &xcmp[a1]
+ c3 := &xcmp[a3]
+ for i := range ops {
+ op := &ops[i]
+ if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || c.ctxt.Arch.Family == op.family) {
+ p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
+ return op
+ }
+ }
+
+ c.ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3))
+ prasm(p)
+ // Turn illegal instruction into an UNDEF, avoid crashing in asmout.
+ return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0}
+}
+
+func cmp(a int, b int) bool {
+ if a == b {
+ return true
+ }
+ switch a {
+ case C_LCON:
+ if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
+ return true
+ }
+
+ case C_ADD0CON:
+ if b == C_ADDCON {
+ return true
+ }
+ fallthrough
+
+ case C_ADDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_AND0CON:
+ if b == C_ANDCON {
+ return true
+ }
+ fallthrough
+
+ case C_ANDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_UCON:
+ if b == C_ZCON {
+ return true
+ }
+
+ case C_SCON:
+ if b == C_ZCON {
+ return true
+ }
+
+ case C_LACON:
+ if b == C_SACON {
+ return true
+ }
+
+ case C_LBRA:
+ if b == C_SBRA {
+ return true
+ }
+
+ case C_LEXT:
+ if b == C_SEXT {
+ return true
+ }
+
+ case C_LAUTO:
+ if b == C_SAUTO {
+ return true
+ }
+
+ case C_REG:
+ if b == C_ZCON {
+ return r0iszero != 0 /*TypeKind(100016)*/
+ }
+
+ case C_LOREG:
+ if b == C_ZOREG || b == C_SOREG {
+ return true
+ }
+
+ case C_SOREG:
+ if b == C_ZOREG {
+ return true
+ }
+ }
+
+ return false
+}
+
+type ocmp []Optab
+
+func (x ocmp) Len() int {
+ return len(x)
+}
+
+func (x ocmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x ocmp) Less(i, j int) bool {
+ p1 := &x[i]
+ p2 := &x[j]
+ n := int(p1.as) - int(p2.as)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a1) - int(p2.a1)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a2) - int(p2.a2)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a3) - int(p2.a3)
+ if n != 0 {
+ return n < 0
+ }
+ return false
+}
+
+func opset(a, b0 obj.As) {
+ oprange[a&obj.AMask] = oprange[b0]
+}
+
+func buildop(ctxt *obj.Link) {
+ if oprange[AOR&obj.AMask] != nil {
+ // Already initialized; stop now.
+ // This happens in the cmd/asm tests,
+ // each of which re-initializes the arch.
+ return
+ }
+
+ var n int
+
+ for i := 0; i < C_NCLASS; i++ {
+ for n = 0; n < C_NCLASS; n++ {
+ if cmp(n, i) {
+ xcmp[i][n] = true
+ }
+ }
+ }
+ for n = 0; optab[n].as != obj.AXXX; n++ {
+ }
+ sort.Sort(ocmp(optab[:n]))
+ for i := 0; i < n; i++ {
+ r := optab[i].as
+ r0 := r & obj.AMask
+ start := i
+ for optab[i].as == r {
+ i++
+ }
+ oprange[r0] = optab[start:i]
+ i--
+
+ switch r {
+ default:
+ ctxt.Diag("unknown op in build: %v", r)
+ ctxt.DiagFlush()
+ log.Fatalf("bad code")
+
+ case AABSF:
+ opset(AMOVFD, r0)
+ opset(AMOVDF, r0)
+ opset(AMOVWF, r0)
+ opset(AMOVFW, r0)
+ opset(AMOVWD, r0)
+ opset(AMOVDW, r0)
+ opset(ANEGF, r0)
+ opset(ANEGD, r0)
+ opset(AABSD, r0)
+ opset(ATRUNCDW, r0)
+ opset(ATRUNCFW, r0)
+ opset(ASQRTF, r0)
+ opset(ASQRTD, r0)
+
+ case AMOVVF:
+ opset(AMOVVD, r0)
+ opset(AMOVFV, r0)
+ opset(AMOVDV, r0)
+ opset(ATRUNCDV, r0)
+ opset(ATRUNCFV, r0)
+
+ case AADD:
+ opset(ASGT, r0)
+ opset(ASGTU, r0)
+ opset(AADDU, r0)
+
+ case AADDV:
+ opset(AADDVU, r0)
+
+ case AADDF:
+ opset(ADIVF, r0)
+ opset(ADIVD, r0)
+ opset(AMULF, r0)
+ opset(AMULD, r0)
+ opset(ASUBF, r0)
+ opset(ASUBD, r0)
+ opset(AADDD, r0)
+
+ case AAND:
+ opset(AOR, r0)
+ opset(AXOR, r0)
+
+ case ABEQ:
+ opset(ABNE, r0)
+
+ case ABLEZ:
+ opset(ABGEZ, r0)
+ opset(ABGEZAL, r0)
+ opset(ABLTZ, r0)
+ opset(ABLTZAL, r0)
+ opset(ABGTZ, r0)
+
+ case AMOVB:
+ opset(AMOVH, r0)
+
+ case AMOVBU:
+ opset(AMOVHU, r0)
+
+ case AMUL:
+ opset(AREM, r0)
+ opset(AREMU, r0)
+ opset(ADIVU, r0)
+ opset(AMULU, r0)
+ opset(ADIV, r0)
+ opset(AMADD, r0)
+ opset(AMSUB, r0)
+
+ case AMULV:
+ opset(ADIVV, r0)
+ opset(ADIVVU, r0)
+ opset(AMULVU, r0)
+ opset(AREMV, r0)
+ opset(AREMVU, r0)
+
+ case ASLL:
+ opset(ASRL, r0)
+ opset(ASRA, r0)
+ opset(AROTR, r0)
+
+ case ASLLV:
+ opset(ASRAV, r0)
+ opset(ASRLV, r0)
+ opset(AROTRV, r0)
+
+ case ASUB:
+ opset(ASUBU, r0)
+ opset(ANOR, r0)
+
+ case ASUBV:
+ opset(ASUBVU, r0)
+
+ case ASYSCALL:
+ opset(ASYNC, r0)
+ opset(ANOOP, r0)
+ opset(ATLBP, r0)
+ opset(ATLBR, r0)
+ opset(ATLBWI, r0)
+ opset(ATLBWR, r0)
+
+ case ACMPEQF:
+ opset(ACMPGTF, r0)
+ opset(ACMPGTD, r0)
+ opset(ACMPGEF, r0)
+ opset(ACMPGED, r0)
+ opset(ACMPEQD, r0)
+
+ case ABFPT:
+ opset(ABFPF, r0)
+
+ case AMOVWL:
+ opset(AMOVWR, r0)
+
+ case AMOVVL:
+ opset(AMOVVR, r0)
+
+ case AVMOVB:
+ opset(AVMOVH, r0)
+ opset(AVMOVW, r0)
+ opset(AVMOVD, r0)
+
+ case AMOVW,
+ AMOVD,
+ AMOVF,
+ AMOVV,
+ ABREAK,
+ ARFE,
+ AJAL,
+ AJMP,
+ AMOVWU,
+ ALL,
+ ALLV,
+ ASC,
+ ASCV,
+ ANEGW,
+ ANEGV,
+ AWORD,
+ obj.ANOP,
+ obj.ATEXT,
+ obj.AUNDEF,
+ obj.AFUNCDATA,
+ obj.APCDATA,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ break
+
+ case ACMOVN:
+ opset(ACMOVZ, r0)
+
+ case ACMOVT:
+ opset(ACMOVF, r0)
+
+ case ACLO:
+ opset(ACLZ, r0)
+
+ case ATEQ:
+ opset(ATNE, r0)
+ }
+ }
+}
+
+func OP(x uint32, y uint32) uint32 {
+ return x<<3 | y<<0
+}
+
+func SP(x uint32, y uint32) uint32 {
+ return x<<29 | y<<26
+}
+
+func BCOND(x uint32, y uint32) uint32 {
+ return x<<19 | y<<16
+}
+
+func MMU(x uint32, y uint32) uint32 {
+ return SP(2, 0) | 16<<21 | x<<3 | y<<0
+}
+
+func FPF(x uint32, y uint32) uint32 {
+ return SP(2, 1) | 16<<21 | x<<3 | y<<0
+}
+
+func FPD(x uint32, y uint32) uint32 {
+ return SP(2, 1) | 17<<21 | x<<3 | y<<0
+}
+
+func FPW(x uint32, y uint32) uint32 {
+ return SP(2, 1) | 20<<21 | x<<3 | y<<0
+}
+
+func FPV(x uint32, y uint32) uint32 {
+ return SP(2, 1) | 21<<21 | x<<3 | y<<0
+}
+
+func OP_RRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 {
+ return op | (r1&31)<<16 | (r2&31)<<21 | (r3&31)<<11
+}
+
+func OP_IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
+ return op | i&0xFFFF | (r2&31)<<21 | (r3&31)<<16
+}
+
+func OP_SRR(op uint32, s uint32, r2 uint32, r3 uint32) uint32 {
+ return op | (s&31)<<6 | (r2&31)<<16 | (r3&31)<<11
+}
+
+func OP_FRRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 {
+ return op | (r1&31)<<16 | (r2&31)<<11 | (r3&31)<<6
+}
+
+func OP_JMP(op uint32, i uint32) uint32 {
+ return op | i&0x3FFFFFF
+}
+
+func OP_VI10(op uint32, df uint32, s10 int32, wd uint32, minor uint32) uint32 {
+ return 0x1e<<26 | (op&7)<<23 | (df&3)<<21 | uint32(s10&0x3FF)<<11 | (wd&31)<<6 | minor&0x3F
+}
+
+func OP_VMI10(s10 int32, rs uint32, wd uint32, minor uint32, df uint32) uint32 {
+ return 0x1e<<26 | uint32(s10&0x3FF)<<16 | (rs&31)<<11 | (wd&31)<<6 | (minor&15)<<2 | df&3
+}
+
+func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
+ o1 := uint32(0)
+ o2 := uint32(0)
+ o3 := uint32(0)
+ o4 := uint32(0)
+
+ add := AADDU
+
+ if c.ctxt.Arch.Family == sys.MIPS64 {
+ add = AADDVU
+ }
+ switch o.type_ {
+ default:
+ c.ctxt.Diag("unknown type %d %v", o.type_)
+ prasm(p)
+
+ case 0: /* pseudo ops */
+ break
+
+ case 1: /* mov r1,r2 ==> OR r1,r0,r2 */
+ a := AOR
+ if p.As == AMOVW && c.ctxt.Arch.Family == sys.MIPS64 {
+ // on MIPS64, most of the 32-bit instructions have unpredictable behavior,
+ // but SLL is special that the result is always sign-extended to 64-bit.
+ a = ASLL
+ }
+ o1 = OP_RRR(c.oprrr(a), uint32(p.From.Reg), uint32(REGZERO), uint32(p.To.Reg))
+
+ case 2: /* add/sub r1,[r2],r3 */
+ r := int(p.Reg)
+ if p.As == ANEGW || p.As == ANEGV {
+ r = REGZERO
+ }
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+
+ case 3: /* mov $soreg, r ==> or/add $i,o,r */
+ v := c.regoff(&p.From)
+
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ a := add
+ if o.a1 == C_ANDCON {
+ a = AOR
+ }
+
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
+
+ case 4: /* add $scon,[r1],r2 */
+ v := c.regoff(&p.From)
+
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+
+ o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+
+ case 5: /* syscall */
+ o1 = c.oprrr(p.As)
+
+ case 6: /* beq r1,[r2],sbra */
+ v := int32(0)
+ if p.To.Target() == nil {
+ v = int32(-4) >> 2
+ } else {
+ v = int32(p.To.Target().Pc-p.Pc-4) >> 2
+ }
+ if (v<<16)>>16 != v {
+ c.ctxt.Diag("short branch too far\n%v", p)
+ }
+ o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
+ // for ABFPT and ABFPF only: always fill delay slot with 0
+ // see comments in func preprocess for details.
+ o2 = 0
+
+ case 7: /* mov r, soreg ==> sw o(r) */
+ r := int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ v := c.regoff(&p.To)
+ o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.From.Reg))
+
+ case 8: /* mov soreg, r ==> lw o(r) */
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+
+ case 9: /* sll r1,[r2],r3 */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = OP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+
+ case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */
+ v := c.regoff(&p.From)
+ a := AOR
+ if v < 0 {
+ a = AADDU
+ }
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+
+ case 11: /* jmp lbra */
+ v := int32(0)
+ if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP {
+ // use PC-relative branch for short branches
+ // BEQ R0, R0, sbra
+ if p.To.Target() == nil {
+ v = int32(-4) >> 2
+ } else {
+ v = int32(p.To.Target().Pc-p.Pc-4) >> 2
+ }
+ if (v<<16)>>16 == v {
+ o1 = OP_IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO))
+ break
+ }
+ }
+ if p.To.Target() == nil {
+ v = int32(p.Pc) >> 2
+ } else {
+ v = int32(p.To.Target().Pc) >> 2
+ }
+ o1 = OP_JMP(c.opirr(p.As), uint32(v))
+ if p.To.Sym == nil {
+ p.To.Sym = c.cursym.Func().Text.From.Sym
+ p.To.Offset = p.To.Target().Pc
+ }
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+ if p.As == AJAL {
+ rel.Type = objabi.R_CALLMIPS
+ } else {
+ rel.Type = objabi.R_JMPMIPS
+ }
+
+ case 12: /* movbs r,r */
+ // NOTE: this case does not use REGTMP. If it ever does,
+ // remove the NOTUSETMP flag in optab.
+ v := 16
+ if p.As == AMOVB {
+ v = 24
+ }
+ o1 = OP_SRR(c.opirr(ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg))
+ o2 = OP_SRR(c.opirr(ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
+
+ case 13: /* movbu r,r */
+ if p.As == AMOVBU {
+ o1 = OP_IRR(c.opirr(AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg))
+ } else {
+ o1 = OP_IRR(c.opirr(AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg))
+ }
+
+ case 14: /* movwu r,r */
+ // NOTE: this case does not use REGTMP. If it ever does,
+ // remove the NOTUSETMP flag in optab.
+ o1 = OP_SRR(c.opirr(-ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+ o2 = OP_SRR(c.opirr(-ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+
+ case 15: /* teq $c r,r */
+ v := c.regoff(&p.From)
+ r := int(p.Reg)
+ if r == 0 {
+ r = REGZERO
+ }
+ /* only use 10 bits of trap code */
+ o1 = OP_IRR(c.opirr(p.As), (uint32(v)&0x3FF)<<6, uint32(r), uint32(p.To.Reg))
+
+ case 16: /* sll $c,[r1],r2 */
+ v := c.regoff(&p.From)
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+
+ /* OP_SRR will use only the low 5 bits of the shift value */
+ if v >= 32 && vshift(p.As) {
+ o1 = OP_SRR(c.opirr(-p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
+ } else {
+ o1 = OP_SRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+ }
+
+ case 17:
+ o1 = OP_RRR(c.oprrr(p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
+
+ case 18: /* jmp [r1],0(r2) */
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.To.Reg), uint32(r))
+ if p.As == obj.ACALL {
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 0
+ rel.Type = objabi.R_CALLIND
+ }
+
+ case 19: /* mov $lcon,r ==> lu+or */
+ // NOTE: this case does not use REGTMP. If it ever does,
+ // remove the NOTUSETMP flag in optab.
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
+ o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
+
+ case 20: /* mov lo/hi,r */
+ a := OP(2, 0) /* mfhi */
+ if p.From.Reg == REG_LO {
+ a = OP(2, 2) /* mflo */
+ }
+ o1 = OP_RRR(a, uint32(REGZERO), uint32(REGZERO), uint32(p.To.Reg))
+
+ case 21: /* mov r,lo/hi */
+ a := OP(2, 1) /* mthi */
+ if p.To.Reg == REG_LO {
+ a = OP(2, 3) /* mtlo */
+ }
+ o1 = OP_RRR(a, uint32(REGZERO), uint32(p.From.Reg), uint32(REGZERO))
+
+ case 22: /* mul r1,r2 [r3]*/
+ if p.To.Reg != 0 {
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ a := SP(3, 4) | 2 /* mul */
+ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+ } else {
+ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
+ }
+
+ case 23: /* add $lcon,r1,r2 ==> lu+or+add */
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o3 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+
+ case 24: /* mov $ucon,r ==> lu r */
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
+
+ case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+
+ case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o3 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+
+ case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */
+ v := c.regoff(&p.From)
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ a := -AMOVF
+ if p.As == AMOVD {
+ a = -AMOVD
+ }
+ switch o.size {
+ case 12:
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
+
+ case 4:
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
+ }
+
+ case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */
+ v := c.regoff(&p.To)
+ r := int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ a := AMOVF
+ if p.As == AMOVD {
+ a = AMOVD
+ }
+ switch o.size {
+ case 12:
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
+
+ case 4:
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.From.Reg))
+ }
+
+ case 30: /* movw r,fr */
+ a := SP(2, 1) | (4 << 21) /* mtc1 */
+ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
+
+ case 31: /* movw fr,r */
+ a := SP(2, 1) | (0 << 21) /* mtc1 */
+ o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
+
+ case 32: /* fadd fr1,[fr2],fr3 */
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = OP_FRRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+
+ case 33: /* fabs fr1, fr3 */
+ o1 = OP_FRRR(c.oprrr(p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+
+ case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */
+ v := c.regoff(&p.From)
+ a := AADDU
+ if o.a1 == C_ANDCON {
+ a = AOR
+ }
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
+ o2 = OP_RRR(SP(2, 1)|(4<<21), uint32(REGTMP), uint32(0), uint32(p.To.Reg)) /* mtc1 */
+
+ case 35: /* mov r,lext/auto/oreg ==> sw o(REGTMP) */
+ v := c.regoff(&p.To)
+ r := int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
+
+ case 36: /* mov lext/auto/oreg,r ==> lw o(REGTMP) */
+ v := c.regoff(&p.From)
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
+
+ case 37: /* movw r,mr */
+ a := SP(2, 0) | (4 << 21) /* mtc0 */
+ if p.As == AMOVV {
+ a = SP(2, 0) | (5 << 21) /* dmtc0 */
+ }
+ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
+
+ case 38: /* movw mr,r */
+ a := SP(2, 0) | (0 << 21) /* mfc0 */
+ if p.As == AMOVV {
+ a = SP(2, 0) | (1 << 21) /* dmfc0 */
+ }
+ o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
+
+ case 40: /* word */
+ o1 = uint32(c.regoff(&p.From))
+
+ case 41: /* movw f,fcr */
+ o1 = OP_RRR(SP(2, 1)|(6<<21), uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) /* mtcc1 */
+
+ case 42: /* movw fcr,r */
+ o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) /* mfcc1 */
+
+ case 47: /* movv r,fr */
+ a := SP(2, 1) | (5 << 21) /* dmtc1 */
+ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
+
+ case 48: /* movv fr,r */
+ a := SP(2, 1) | (1 << 21) /* dmtc1 */
+ o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
+
+ case 49: /* undef */
+ o1 = 52 /* trap -- teq r0, r0 */
+
+ /* relocation operations */
+ case 50: /* mov r,addr ==> lu + add REGSB, REGTMP + sw o(REGTMP) */
+ o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+ rel.Type = objabi.R_ADDRMIPSU
+ o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
+ rel2 := obj.Addrel(c.cursym)
+ rel2.Off = int32(c.pc + 4)
+ rel2.Siz = 4
+ rel2.Sym = p.To.Sym
+ rel2.Add = p.To.Offset
+ rel2.Type = objabi.R_ADDRMIPS
+
+ if o.size == 12 {
+ o3 = o2
+ o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
+ rel2.Off += 4
+ }
+
+ case 51: /* mov addr,r ==> lu + add REGSB, REGTMP + lw o(REGTMP) */
+ o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 4
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ rel.Type = objabi.R_ADDRMIPSU
+ o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
+ rel2 := obj.Addrel(c.cursym)
+ rel2.Off = int32(c.pc + 4)
+ rel2.Siz = 4
+ rel2.Sym = p.From.Sym
+ rel2.Add = p.From.Offset
+ rel2.Type = objabi.R_ADDRMIPS
+
+ if o.size == 12 {
+ o3 = o2
+ o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
+ rel2.Off += 4
+ }
+
+ case 52: /* mov $lext, r ==> lu + add REGSB, r + add */
+ // NOTE: this case does not use REGTMP. If it ever does,
+ // remove the NOTUSETMP flag in optab.
+ o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 4
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ rel.Type = objabi.R_ADDRMIPSU
+ o2 = OP_IRR(c.opirr(add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+ rel2 := obj.Addrel(c.cursym)
+ rel2.Off = int32(c.pc + 4)
+ rel2.Siz = 4
+ rel2.Sym = p.From.Sym
+ rel2.Add = p.From.Offset
+ rel2.Type = objabi.R_ADDRMIPS
+
+ if o.size == 12 {
+ o3 = o2
+ o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg))
+ rel2.Off += 4
+ }
+
+ case 53: /* mov r, tlsvar ==> rdhwr + sw o(r3) */
+ // clobbers R3 !
+ // load thread pointer with RDHWR, R3 is used for fast kernel emulation on Linux
+ // NOTE: this case does not use REGTMP. If it ever does,
+ // remove the NOTUSETMP flag in optab.
+ o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
+ o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc + 4)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+ rel.Type = objabi.R_ADDRMIPSTLS
+
+ case 54: /* mov tlsvar, r ==> rdhwr + lw o(r3) */
+ // clobbers R3 !
+ // NOTE: this case does not use REGTMP. If it ever does,
+ // remove the NOTUSETMP flag in optab.
+ o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
+ o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc + 4)
+ rel.Siz = 4
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ rel.Type = objabi.R_ADDRMIPSTLS
+
+ case 55: /* mov $tlsvar, r ==> rdhwr + add */
+ // clobbers R3 !
+ // NOTE: this case does not use REGTMP. If it ever does,
+ // remove the NOTUSETMP flag in optab.
+ o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
+ o2 = OP_IRR(c.opirr(add), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc + 4)
+ rel.Siz = 4
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ rel.Type = objabi.R_ADDRMIPSTLS
+
+ case 56: /* vmov{b,h,w,d} $scon, wr */
+
+ v := c.regoff(&p.From)
+ o1 = OP_VI10(110, c.twobitdf(p.As), v, uint32(p.To.Reg), 7)
+
+ case 57: /* vld $soreg, wr */
+ v := c.lsoffset(p.As, c.regoff(&p.From))
+ o1 = OP_VMI10(v, uint32(p.From.Reg), uint32(p.To.Reg), 8, c.twobitdf(p.As))
+
+ case 58: /* vst wr, $soreg */
+ v := c.lsoffset(p.As, c.regoff(&p.To))
+ o1 = OP_VMI10(v, uint32(p.To.Reg), uint32(p.From.Reg), 9, c.twobitdf(p.As))
+ }
+
+ out[0] = o1
+ out[1] = o2
+ out[2] = o3
+ out[3] = o4
+}
+
+func (c *ctxt0) vregoff(a *obj.Addr) int64 {
+ c.instoffset = 0
+ c.aclass(a)
+ return c.instoffset
+}
+
+func (c *ctxt0) regoff(a *obj.Addr) int32 {
+ return int32(c.vregoff(a))
+}
+
+func (c *ctxt0) oprrr(a obj.As) uint32 {
+ switch a {
+ case AADD:
+ return OP(4, 0)
+ case AADDU:
+ return OP(4, 1)
+ case ASGT:
+ return OP(5, 2)
+ case ASGTU:
+ return OP(5, 3)
+ case AAND:
+ return OP(4, 4)
+ case AOR:
+ return OP(4, 5)
+ case AXOR:
+ return OP(4, 6)
+ case ASUB:
+ return OP(4, 2)
+ case ASUBU, ANEGW:
+ return OP(4, 3)
+ case ANOR:
+ return OP(4, 7)
+ case ASLL:
+ return OP(0, 4)
+ case ASRL:
+ return OP(0, 6)
+ case ASRA:
+ return OP(0, 7)
+ case AROTR:
+ return OP(8, 6)
+ case ASLLV:
+ return OP(2, 4)
+ case ASRLV:
+ return OP(2, 6)
+ case ASRAV:
+ return OP(2, 7)
+ case AROTRV:
+ return OP(10, 6)
+ case AADDV:
+ return OP(5, 4)
+ case AADDVU:
+ return OP(5, 5)
+ case ASUBV:
+ return OP(5, 6)
+ case ASUBVU, ANEGV:
+ return OP(5, 7)
+ case AREM,
+ ADIV:
+ return OP(3, 2)
+ case AREMU,
+ ADIVU:
+ return OP(3, 3)
+ case AMUL:
+ return OP(3, 0)
+ case AMULU:
+ return OP(3, 1)
+ case AREMV,
+ ADIVV:
+ return OP(3, 6)
+ case AREMVU,
+ ADIVVU:
+ return OP(3, 7)
+ case AMULV:
+ return OP(3, 4)
+ case AMULVU:
+ return OP(3, 5)
+
+ case AJMP:
+ return OP(1, 0)
+ case AJAL:
+ return OP(1, 1)
+
+ case ABREAK:
+ return OP(1, 5)
+ case ASYSCALL:
+ return OP(1, 4)
+ case ATLBP:
+ return MMU(1, 0)
+ case ATLBR:
+ return MMU(0, 1)
+ case ATLBWI:
+ return MMU(0, 2)
+ case ATLBWR:
+ return MMU(0, 6)
+ case ARFE:
+ return MMU(2, 0)
+
+ case ADIVF:
+ return FPF(0, 3)
+ case ADIVD:
+ return FPD(0, 3)
+ case AMULF:
+ return FPF(0, 2)
+ case AMULD:
+ return FPD(0, 2)
+ case ASUBF:
+ return FPF(0, 1)
+ case ASUBD:
+ return FPD(0, 1)
+ case AADDF:
+ return FPF(0, 0)
+ case AADDD:
+ return FPD(0, 0)
+ case ATRUNCFV:
+ return FPF(1, 1)
+ case ATRUNCDV:
+ return FPD(1, 1)
+ case ATRUNCFW:
+ return FPF(1, 5)
+ case ATRUNCDW:
+ return FPD(1, 5)
+ case AMOVFV:
+ return FPF(4, 5)
+ case AMOVDV:
+ return FPD(4, 5)
+ case AMOVVF:
+ return FPV(4, 0)
+ case AMOVVD:
+ return FPV(4, 1)
+ case AMOVFW:
+ return FPF(4, 4)
+ case AMOVDW:
+ return FPD(4, 4)
+ case AMOVWF:
+ return FPW(4, 0)
+ case AMOVDF:
+ return FPD(4, 0)
+ case AMOVWD:
+ return FPW(4, 1)
+ case AMOVFD:
+ return FPF(4, 1)
+ case AABSF:
+ return FPF(0, 5)
+ case AABSD:
+ return FPD(0, 5)
+ case AMOVF:
+ return FPF(0, 6)
+ case AMOVD:
+ return FPD(0, 6)
+ case ANEGF:
+ return FPF(0, 7)
+ case ANEGD:
+ return FPD(0, 7)
+ case ACMPEQF:
+ return FPF(6, 2)
+ case ACMPEQD:
+ return FPD(6, 2)
+ case ACMPGTF:
+ return FPF(7, 4)
+ case ACMPGTD:
+ return FPD(7, 4)
+ case ACMPGEF:
+ return FPF(7, 6)
+ case ACMPGED:
+ return FPD(7, 6)
+
+ case ASQRTF:
+ return FPF(0, 4)
+ case ASQRTD:
+ return FPD(0, 4)
+
+ case ASYNC:
+ return OP(1, 7)
+ case ANOOP:
+ return 0
+
+ case ACMOVN:
+ return OP(1, 3)
+ case ACMOVZ:
+ return OP(1, 2)
+ case ACMOVT:
+ return OP(0, 1) | (1 << 16)
+ case ACMOVF:
+ return OP(0, 1) | (0 << 16)
+ case ACLO:
+ return SP(3, 4) | OP(4, 1)
+ case ACLZ:
+ return SP(3, 4) | OP(4, 0)
+ case AMADD:
+ return SP(3, 4) | OP(0, 0)
+ case AMSUB:
+ return SP(3, 4) | OP(0, 4)
+ }
+
+ if a < 0 {
+ c.ctxt.Diag("bad rrr opcode -%v", -a)
+ } else {
+ c.ctxt.Diag("bad rrr opcode %v", a)
+ }
+ return 0
+}
+
+func (c *ctxt0) opirr(a obj.As) uint32 {
+ switch a {
+ case AADD:
+ return SP(1, 0)
+ case AADDU:
+ return SP(1, 1)
+ case ASGT:
+ return SP(1, 2)
+ case ASGTU:
+ return SP(1, 3)
+ case AAND:
+ return SP(1, 4)
+ case AOR:
+ return SP(1, 5)
+ case AXOR:
+ return SP(1, 6)
+ case ALUI:
+ return SP(1, 7)
+ case ASLL:
+ return OP(0, 0)
+ case ASRL:
+ return OP(0, 2)
+ case ASRA:
+ return OP(0, 3)
+ case AROTR:
+ return OP(0, 2) | 1<<21
+ case AADDV:
+ return SP(3, 0)
+ case AADDVU:
+ return SP(3, 1)
+
+ case AJMP:
+ return SP(0, 2)
+ case AJAL,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ return SP(0, 3)
+ case ABEQ:
+ return SP(0, 4)
+ case -ABEQ:
+ return SP(2, 4) /* likely */
+ case ABNE:
+ return SP(0, 5)
+ case -ABNE:
+ return SP(2, 5) /* likely */
+ case ABGEZ:
+ return SP(0, 1) | BCOND(0, 1)
+ case -ABGEZ:
+ return SP(0, 1) | BCOND(0, 3) /* likely */
+ case ABGEZAL:
+ return SP(0, 1) | BCOND(2, 1)
+ case -ABGEZAL:
+ return SP(0, 1) | BCOND(2, 3) /* likely */
+ case ABGTZ:
+ return SP(0, 7)
+ case -ABGTZ:
+ return SP(2, 7) /* likely */
+ case ABLEZ:
+ return SP(0, 6)
+ case -ABLEZ:
+ return SP(2, 6) /* likely */
+ case ABLTZ:
+ return SP(0, 1) | BCOND(0, 0)
+ case -ABLTZ:
+ return SP(0, 1) | BCOND(0, 2) /* likely */
+ case ABLTZAL:
+ return SP(0, 1) | BCOND(2, 0)
+ case -ABLTZAL:
+ return SP(0, 1) | BCOND(2, 2) /* likely */
+ case ABFPT:
+ return SP(2, 1) | (257 << 16)
+ case -ABFPT:
+ return SP(2, 1) | (259 << 16) /* likely */
+ case ABFPF:
+ return SP(2, 1) | (256 << 16)
+ case -ABFPF:
+ return SP(2, 1) | (258 << 16) /* likely */
+
+ case AMOVB,
+ AMOVBU:
+ return SP(5, 0)
+ case AMOVH,
+ AMOVHU:
+ return SP(5, 1)
+ case AMOVW,
+ AMOVWU:
+ return SP(5, 3)
+ case AMOVV:
+ return SP(7, 7)
+ case AMOVF:
+ return SP(7, 1)
+ case AMOVD:
+ return SP(7, 5)
+ case AMOVWL:
+ return SP(5, 2)
+ case AMOVWR:
+ return SP(5, 6)
+ case AMOVVL:
+ return SP(5, 4)
+ case AMOVVR:
+ return SP(5, 5)
+
+ case ABREAK:
+ return SP(5, 7)
+
+ case -AMOVWL:
+ return SP(4, 2)
+ case -AMOVWR:
+ return SP(4, 6)
+ case -AMOVVL:
+ return SP(3, 2)
+ case -AMOVVR:
+ return SP(3, 3)
+ case -AMOVB:
+ return SP(4, 0)
+ case -AMOVBU:
+ return SP(4, 4)
+ case -AMOVH:
+ return SP(4, 1)
+ case -AMOVHU:
+ return SP(4, 5)
+ case -AMOVW:
+ return SP(4, 3)
+ case -AMOVWU:
+ return SP(4, 7)
+ case -AMOVV:
+ return SP(6, 7)
+ case -AMOVF:
+ return SP(6, 1)
+ case -AMOVD:
+ return SP(6, 5)
+
+ case ASLLV:
+ return OP(7, 0)
+ case ASRLV:
+ return OP(7, 2)
+ case ASRAV:
+ return OP(7, 3)
+ case AROTRV:
+ return OP(7, 2) | 1<<21
+ case -ASLLV:
+ return OP(7, 4)
+ case -ASRLV:
+ return OP(7, 6)
+ case -ASRAV:
+ return OP(7, 7)
+ case -AROTRV:
+ return OP(7, 6) | 1<<21
+
+ case ATEQ:
+ return OP(6, 4)
+ case ATNE:
+ return OP(6, 6)
+ case -ALL:
+ return SP(6, 0)
+ case -ALLV:
+ return SP(6, 4)
+ case ASC:
+ return SP(7, 0)
+ case ASCV:
+ return SP(7, 4)
+ }
+
+ if a < 0 {
+ c.ctxt.Diag("bad irr opcode -%v", -a)
+ } else {
+ c.ctxt.Diag("bad irr opcode %v", a)
+ }
+ return 0
+}
+
+func vshift(a obj.As) bool {
+ switch a {
+ case ASLLV,
+ ASRLV,
+ ASRAV,
+ AROTRV:
+ return true
+ }
+ return false
+}
+
+// MSA Two-bit Data Format Field Encoding
+func (c *ctxt0) twobitdf(a obj.As) uint32 {
+ switch a {
+ case AVMOVB:
+ return 0
+ case AVMOVH:
+ return 1
+ case AVMOVW:
+ return 2
+ case AVMOVD:
+ return 3
+ default:
+ c.ctxt.Diag("unsupported data format %v", a)
+ }
+ return 0
+}
+
+// MSA Load/Store offset have to be multiple of size of data format
+func (c *ctxt0) lsoffset(a obj.As, o int32) int32 {
+ var mod int32
+ switch a {
+ case AVMOVB:
+ mod = 1
+ case AVMOVH:
+ mod = 2
+ case AVMOVW:
+ mod = 4
+ case AVMOVD:
+ mod = 8
+ default:
+ c.ctxt.Diag("unsupported instruction:%v", a)
+ }
+
+ if o%mod != 0 {
+ c.ctxt.Diag("invalid offset for %v: %d is not a multiple of %d", a, o, mod)
+ }
+
+ return o / mod
+}
diff --git a/src/cmd/internal/obj/mips/list0.go b/src/cmd/internal/obj/mips/list0.go
new file mode 100644
index 0000000..f734e21
--- /dev/null
+++ b/src/cmd/internal/obj/mips/list0.go
@@ -0,0 +1,83 @@
+// cmd/9l/list.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package mips
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+func init() {
+ obj.RegisterRegister(obj.RBaseMIPS, REG_LAST+1, rconv)
+ obj.RegisterOpcode(obj.ABaseMIPS, Anames)
+}
+
+func rconv(r int) string {
+ if r == 0 {
+ return "NONE"
+ }
+ if r == REGG {
+ // Special case.
+ return "g"
+ }
+ if REG_R0 <= r && r <= REG_R31 {
+ return fmt.Sprintf("R%d", r-REG_R0)
+ }
+ if REG_F0 <= r && r <= REG_F31 {
+ return fmt.Sprintf("F%d", r-REG_F0)
+ }
+ if REG_M0 <= r && r <= REG_M31 {
+ return fmt.Sprintf("M%d", r-REG_M0)
+ }
+ if REG_FCR0 <= r && r <= REG_FCR31 {
+ return fmt.Sprintf("FCR%d", r-REG_FCR0)
+ }
+ if REG_W0 <= r && r <= REG_W31 {
+ return fmt.Sprintf("W%d", r-REG_W0)
+ }
+ if r == REG_HI {
+ return "HI"
+ }
+ if r == REG_LO {
+ return "LO"
+ }
+
+ return fmt.Sprintf("Rgok(%d)", r-obj.RBaseMIPS)
+}
+
+func DRconv(a int) string {
+ s := "C_??"
+ if a >= C_NONE && a <= C_NCLASS {
+ s = cnames0[a]
+ }
+ var fp string
+ fp += s
+ return fp
+}
diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go
new file mode 100644
index 0000000..b96a28a
--- /dev/null
+++ b/src/cmd/internal/obj/mips/obj0.go
@@ -0,0 +1,1522 @@
+// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package mips
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
+ c := ctxt0{ctxt: ctxt, newprog: newprog}
+
+ p.From.Class = 0
+ p.To.Class = 0
+
+ // Rewrite JMP/JAL to symbol as TYPE_BRANCH.
+ switch p.As {
+ case AJMP,
+ AJAL,
+ ARET,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ if p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+ case AMOVF:
+ if p.From.Type == obj.TYPE_FCONST {
+ f32 := float32(p.From.Val.(float64))
+ if math.Float32bits(f32) == 0 {
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGZERO
+ break
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = ctxt.Float32Sym(f32)
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ case AMOVD:
+ if p.From.Type == obj.TYPE_FCONST {
+ f64 := p.From.Val.(float64)
+ if math.Float64bits(f64) == 0 && c.ctxt.Arch.Family == sys.MIPS64 {
+ p.As = AMOVV
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGZERO
+ break
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = ctxt.Float64Sym(f64)
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ // Put >32-bit constants in memory and load them
+ case AMOVV:
+ if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset {
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = ctxt.Int64Sym(p.From.Offset)
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+ }
+
+ // Rewrite SUB constants into ADD.
+ switch p.As {
+ case ASUB:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADD
+ }
+
+ case ASUBU:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDU
+ }
+
+ case ASUBV:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDV
+ }
+
+ case ASUBVU:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDVU
+ }
+ }
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
+ // TODO(minux): add morestack short-cuts with small fixed frame-size.
+ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym}
+
+ // a switch for enabling/disabling instruction scheduling
+ nosched := true
+
+ if c.cursym.Func().Text == nil || c.cursym.Func().Text.Link == nil {
+ return
+ }
+
+ p := c.cursym.Func().Text
+ textstksiz := p.To.Offset
+ if textstksiz == -ctxt.FixedFrameSize() {
+ // Historical way to mark NOFRAME.
+ p.From.Sym.Set(obj.AttrNoFrame, true)
+ textstksiz = 0
+ }
+ if textstksiz < 0 {
+ c.ctxt.Diag("negative frame size %d - did you mean NOFRAME?", textstksiz)
+ }
+ if p.From.Sym.NoFrame() {
+ if textstksiz != 0 {
+ c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
+ }
+ }
+
+ c.cursym.Func().Args = p.To.Val.(int32)
+ c.cursym.Func().Locals = int32(textstksiz)
+
+ /*
+ * find leaf subroutines
+ * expand RET
+ * expand BECOME pseudo
+ */
+
+ for p := c.cursym.Func().Text; p != nil; p = p.Link {
+ switch p.As {
+ /* too hard, just leave alone */
+ case obj.ATEXT:
+ p.Mark |= LABEL | LEAF | SYNC
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+
+ /* too hard, just leave alone */
+ case AMOVW,
+ AMOVV:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL {
+ p.Mark |= LABEL | SYNC
+ break
+ }
+ if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL {
+ p.Mark |= LABEL | SYNC
+ }
+
+ /* too hard, just leave alone */
+ case ASYSCALL,
+ AWORD,
+ ATLBWR,
+ ATLBWI,
+ ATLBP,
+ ATLBR:
+ p.Mark |= LABEL | SYNC
+
+ case ANOR:
+ if p.To.Type == obj.TYPE_REG {
+ if p.To.Reg == REGZERO {
+ p.Mark |= LABEL | SYNC
+ }
+ }
+
+ case ABGEZAL,
+ ABLTZAL,
+ AJAL,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ c.cursym.Func().Text.Mark &^= LEAF
+ fallthrough
+
+ case AJMP,
+ ABEQ,
+ ABGEZ,
+ ABGTZ,
+ ABLEZ,
+ ABLTZ,
+ ABNE,
+ ABFPT, ABFPF:
+ if p.As == ABFPT || p.As == ABFPF {
+ // We don't treat ABFPT and ABFPF as branches here,
+ // so that we will always fill nop (0x0) in their
+ // delay slot during assembly.
+ // This is to workaround a kernel FPU emulator bug
+ // where it uses the user stack to simulate the
+ // instruction in the delay slot if it's not 0x0,
+ // and somehow that leads to SIGSEGV when the kernel
+ // jump to the stack.
+ p.Mark |= SYNC
+ } else {
+ p.Mark |= BRANCH
+ }
+ q1 := p.To.Target()
+ if q1 != nil {
+ for q1.As == obj.ANOP {
+ q1 = q1.Link
+ p.To.SetTarget(q1)
+ }
+
+ if q1.Mark&LEAF == 0 {
+ q1.Mark |= LABEL
+ }
+ }
+ //else {
+ // p.Mark |= LABEL
+ //}
+ q1 = p.Link
+ if q1 != nil {
+ q1.Mark |= LABEL
+ }
+
+ case ARET:
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+ }
+ }
+
+ var mov, add obj.As
+ if c.ctxt.Arch.Family == sys.MIPS64 {
+ add = AADDV
+ mov = AMOVV
+ } else {
+ add = AADDU
+ mov = AMOVW
+ }
+
+ var q *obj.Prog
+ var q1 *obj.Prog
+ autosize := int32(0)
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ for p := c.cursym.Func().Text; p != nil; p = p.Link {
+ o := p.As
+ switch o {
+ case obj.ATEXT:
+ autosize = int32(textstksiz)
+
+ if p.Mark&LEAF != 0 && autosize == 0 {
+ // A leaf function with no locals has no frame.
+ p.From.Sym.Set(obj.AttrNoFrame, true)
+ }
+
+ if !p.From.Sym.NoFrame() {
+ // If there is a stack frame at all, it includes
+ // space to save the LR.
+ autosize += int32(c.ctxt.FixedFrameSize())
+ }
+
+ if autosize&4 != 0 && c.ctxt.Arch.Family == sys.MIPS64 {
+ autosize += 4
+ }
+
+ if autosize == 0 && c.cursym.Func().Text.Mark&LEAF == 0 {
+ if c.cursym.Func().Text.From.Sym.NoSplit() {
+ if ctxt.Debugvlog {
+ ctxt.Logf("save suppressed in: %s\n", c.cursym.Name)
+ }
+
+ c.cursym.Func().Text.Mark |= LEAF
+ }
+ }
+
+ p.To.Offset = int64(autosize) - ctxt.FixedFrameSize()
+
+ if c.cursym.Func().Text.Mark&LEAF != 0 {
+ c.cursym.Set(obj.AttrLeaf, true)
+ if p.From.Sym.NoFrame() {
+ break
+ }
+ }
+
+ if !p.From.Sym.NoSplit() {
+ p = c.stacksplit(p, autosize) // emit split check
+ }
+
+ q = p
+
+ if autosize != 0 {
+ // Make sure to save link register for non-empty frame, even if
+ // it is a leaf function, so that traceback works.
+ // Store link register before decrement SP, so if a signal comes
+ // during the execution of the function prologue, the traceback
+ // code will not see a half-updated stack frame.
+ // This sequence is not async preemptible, as if we open a frame
+ // at the current SP, it will clobber the saved LR.
+ q = c.ctxt.StartUnsafePoint(q, c.newprog)
+
+ q = obj.Appendp(q, newprog)
+ q.As = mov
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REGLINK
+ q.To.Type = obj.TYPE_MEM
+ q.To.Offset = int64(-autosize)
+ q.To.Reg = REGSP
+
+ q = obj.Appendp(q, newprog)
+ q.As = add
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(-autosize)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = +autosize
+
+ q = c.ctxt.EndUnsafePoint(q, c.newprog, -1)
+ }
+
+ if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOV g_panic(g), R1
+ // BEQ R1, end
+ // MOV panic_argp(R1), R2
+ // ADD $(autosize+FIXED_FRAME), R29, R3
+ // BNE R2, R3, end
+ // ADD $FIXED_FRAME, R29, R2
+ // MOV R2, panic_argp(R1)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an mips NOP: it encodes to 0 instruction bytes.
+ //
+ // We don't generate this for leafs because that means the wrapped
+ // function was inlined into the wrapper.
+
+ q = obj.Appendp(q, newprog)
+
+ q.As = mov
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REGG
+ q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R1
+
+ q = obj.Appendp(q, newprog)
+ q.As = ABEQ
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R1
+ q.To.Type = obj.TYPE_BRANCH
+ q.Mark |= BRANCH
+ p1 = q
+
+ q = obj.Appendp(q, newprog)
+ q.As = mov
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REG_R1
+ q.From.Offset = 0 // Panic.argp
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R2
+
+ q = obj.Appendp(q, newprog)
+ q.As = add
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize) + ctxt.FixedFrameSize()
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R3
+
+ q = obj.Appendp(q, newprog)
+ q.As = ABNE
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R2
+ q.Reg = REG_R3
+ q.To.Type = obj.TYPE_BRANCH
+ q.Mark |= BRANCH
+ p2 = q
+
+ q = obj.Appendp(q, newprog)
+ q.As = add
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = ctxt.FixedFrameSize()
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R2
+
+ q = obj.Appendp(q, newprog)
+ q.As = mov
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R2
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REG_R1
+ q.To.Offset = 0 // Panic.argp
+
+ q = obj.Appendp(q, newprog)
+
+ q.As = obj.ANOP
+ p1.To.SetTarget(q)
+ p2.To.SetTarget(q)
+ }
+
+ case ARET:
+ if p.From.Type == obj.TYPE_CONST {
+ ctxt.Diag("using BECOME (%v) is not supported!", p)
+ break
+ }
+
+ retSym := p.To.Sym
+ p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction
+ p.To.Sym = nil
+
+ if c.cursym.Func().Text.Mark&LEAF != 0 {
+ if autosize == 0 {
+ p.As = AJMP
+ p.From = obj.Addr{}
+ if retSym != nil { // retjmp
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = retSym
+ } else {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REGLINK
+ p.To.Offset = 0
+ }
+ p.Mark |= BRANCH
+ break
+ }
+
+ p.As = add
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autosize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = -autosize
+
+ q = c.newprog()
+ q.As = AJMP
+ q.Pos = p.Pos
+ if retSym != nil { // retjmp
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Name = obj.NAME_EXTERN
+ q.To.Sym = retSym
+ } else {
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REGLINK
+ q.To.Offset = 0
+ }
+ q.Mark |= BRANCH
+ q.Spadj = +autosize
+
+ q.Link = p.Link
+ p.Link = q
+ break
+ }
+
+ p.As = mov
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 0
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGLINK
+
+ if autosize != 0 {
+ q = c.newprog()
+ q.As = add
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = -autosize
+
+ q.Link = p.Link
+ p.Link = q
+ }
+
+ q1 = c.newprog()
+ q1.As = AJMP
+ q1.Pos = p.Pos
+ if retSym != nil { // retjmp
+ q1.To.Type = obj.TYPE_BRANCH
+ q1.To.Name = obj.NAME_EXTERN
+ q1.To.Sym = retSym
+ } else {
+ q1.To.Type = obj.TYPE_MEM
+ q1.To.Offset = 0
+ q1.To.Reg = REGLINK
+ }
+ q1.Mark |= BRANCH
+ q1.Spadj = +autosize
+
+ q1.Link = q.Link
+ q.Link = q1
+
+ case AADD,
+ AADDU,
+ AADDV,
+ AADDVU:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
+ p.Spadj = int32(-p.From.Offset)
+ }
+
+ case obj.AGETCALLERPC:
+ if cursym.Leaf() {
+ /* MOV LR, Rd */
+ p.As = mov
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGLINK
+ } else {
+ /* MOV (RSP), Rd */
+ p.As = mov
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGSP
+ }
+ }
+
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 {
+ f := c.cursym.Func()
+ if f.FuncFlag&objabi.FuncFlag_SPWRITE == 0 {
+ c.cursym.Func().FuncFlag |= objabi.FuncFlag_SPWRITE
+ if ctxt.Debugvlog || !ctxt.IsAsm {
+ ctxt.Logf("auto-SPWRITE: %s %v\n", c.cursym.Name, p)
+ if !ctxt.IsAsm {
+ ctxt.Diag("invalid auto-SPWRITE in non-assembly")
+ ctxt.DiagFlush()
+ log.Fatalf("bad SPWRITE")
+ }
+ }
+ }
+ }
+ }
+
+ if c.ctxt.Arch.Family == sys.MIPS {
+ // rewrite MOVD into two MOVF in 32-bit mode to avoid unaligned memory access
+ for p = c.cursym.Func().Text; p != nil; p = p1 {
+ p1 = p.Link
+
+ if p.As != AMOVD {
+ continue
+ }
+ if p.From.Type != obj.TYPE_MEM && p.To.Type != obj.TYPE_MEM {
+ continue
+ }
+
+ p.As = AMOVF
+ q = c.newprog()
+ *q = *p
+ q.Link = p.Link
+ p.Link = q
+ p1 = q.Link
+
+ var addrOff int64
+ if c.ctxt.Arch.ByteOrder == binary.BigEndian {
+ addrOff = 4 // swap load/save order
+ }
+ if p.From.Type == obj.TYPE_MEM {
+ reg := REG_F0 + (p.To.Reg-REG_F0)&^1
+ p.To.Reg = reg
+ q.To.Reg = reg + 1
+ p.From.Offset += addrOff
+ q.From.Offset += 4 - addrOff
+ } else if p.To.Type == obj.TYPE_MEM {
+ reg := REG_F0 + (p.From.Reg-REG_F0)&^1
+ p.From.Reg = reg
+ q.From.Reg = reg + 1
+ p.To.Offset += addrOff
+ q.To.Offset += 4 - addrOff
+ }
+ }
+ }
+
+ if nosched {
+ // if we don't do instruction scheduling, simply add
+ // NOP after each branch instruction.
+ for p = c.cursym.Func().Text; p != nil; p = p.Link {
+ if p.Mark&BRANCH != 0 {
+ c.addnop(p)
+ }
+ }
+ return
+ }
+
+ // instruction scheduling
+ q = nil // p - 1
+ q1 = c.cursym.Func().Text // top of block
+ o := 0 // count of instructions
+ for p = c.cursym.Func().Text; p != nil; p = p1 {
+ p1 = p.Link
+ o++
+ if p.Mark&NOSCHED != 0 {
+ if q1 != p {
+ c.sched(q1, q)
+ }
+ for ; p != nil; p = p.Link {
+ if p.Mark&NOSCHED == 0 {
+ break
+ }
+ q = p
+ }
+ p1 = p
+ q1 = p
+ o = 0
+ continue
+ }
+ if p.Mark&(LABEL|SYNC) != 0 {
+ if q1 != p {
+ c.sched(q1, q)
+ }
+ q1 = p
+ o = 1
+ }
+ if p.Mark&(BRANCH|SYNC) != 0 {
+ c.sched(q1, p)
+ q1 = p1
+ o = 0
+ }
+ if o >= NSCHED {
+ c.sched(q1, p)
+ q1 = p1
+ o = 0
+ }
+ q = p
+ }
+}
+
+func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
+ var mov, add obj.As
+
+ if c.ctxt.Arch.Family == sys.MIPS64 {
+ add = AADDV
+ mov = AMOVV
+ } else {
+ add = AADDU
+ mov = AMOVW
+ }
+
+ if c.ctxt.Flag_maymorestack != "" {
+ // Save LR and REGCTXT.
+ frameSize := 2 * c.ctxt.Arch.PtrSize
+
+ p = c.ctxt.StartUnsafePoint(p, c.newprog)
+
+ // MOV REGLINK, -8/-16(SP)
+ p = obj.Appendp(p, c.newprog)
+ p.As = mov
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGLINK
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(-frameSize)
+ p.To.Reg = REGSP
+
+ // MOV REGCTXT, -4/-8(SP)
+ p = obj.Appendp(p, c.newprog)
+ p.As = mov
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGCTXT
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = -int64(c.ctxt.Arch.PtrSize)
+ p.To.Reg = REGSP
+
+ // ADD $-8/$-16, SP
+ p = obj.Appendp(p, c.newprog)
+ p.As = add
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-frameSize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = int32(frameSize)
+
+ // JAL maymorestack
+ p = obj.Appendp(p, c.newprog)
+ p.As = AJAL
+ p.To.Type = obj.TYPE_BRANCH
+ // See ../x86/obj6.go
+ p.To.Sym = c.ctxt.LookupABI(c.ctxt.Flag_maymorestack, c.cursym.ABI())
+ p.Mark |= BRANCH
+
+ // Restore LR and REGCTXT.
+
+ // MOV 0(SP), REGLINK
+ p = obj.Appendp(p, c.newprog)
+ p.As = mov
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 0
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGLINK
+
+ // MOV 4/8(SP), REGCTXT
+ p = obj.Appendp(p, c.newprog)
+ p.As = mov
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(c.ctxt.Arch.PtrSize)
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGCTXT
+
+ // ADD $8/$16, SP
+ p = obj.Appendp(p, c.newprog)
+ p.As = add
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(frameSize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = int32(-frameSize)
+
+ p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
+ }
+
+ // Jump back to here after morestack returns.
+ startPred := p
+
+ // MOV g_stackguard(g), R1
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = mov
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0
+ if c.cursym.CFunc() {
+ p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R1
+
+ // Mark the stack bound check and morestack call async nonpreemptible.
+ // If we get preempted here, when resumed the preemption request is
+ // cleared, but we'll still call morestack, which will double the stack
+ // unnecessarily. See issue #35470.
+ p = c.ctxt.StartUnsafePoint(p, c.newprog)
+
+ var q *obj.Prog
+ if framesize <= objabi.StackSmall {
+ // small stack: SP < stackguard
+ // AGTU SP, stackguard, R1
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = ASGTU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGSP
+ p.Reg = REG_R1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R1
+ } else {
+ // large stack: SP-framesize < stackguard-StackSmall
+ offset := int64(framesize) - objabi.StackSmall
+ if framesize > objabi.StackBig {
+ // Such a large stack we need to protect against underflow.
+ // The runtime guarantees SP > objabi.StackBig, but
+ // framesize is large enough that SP-framesize may
+ // underflow, causing a direct comparison with the
+ // stack guard to incorrectly succeed. We explicitly
+ // guard against underflow.
+ //
+ // SGTU $(framesize-StackSmall), SP, R2
+ // BNE R2, label-of-call-to-morestack
+
+ p = obj.Appendp(p, c.newprog)
+ p.As = ASGTU
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = offset
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+
+ p = obj.Appendp(p, c.newprog)
+ q = p
+ p.As = ABNE
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R2
+ p.To.Type = obj.TYPE_BRANCH
+ p.Mark |= BRANCH
+ }
+
+ // Check against the stack guard. We've ensured this won't underflow.
+ // ADD $-(framesize-StackSmall), SP, R2
+ // SGTU R2, stackguard, R1
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = add
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -offset
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+
+ p = obj.Appendp(p, c.newprog)
+ p.As = ASGTU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R2
+ p.Reg = REG_R1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R1
+ }
+
+ // q1: BNE R1, done
+ p = obj.Appendp(p, c.newprog)
+ q1 := p
+
+ p.As = ABNE
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R1
+ p.To.Type = obj.TYPE_BRANCH
+ p.Mark |= BRANCH
+
+ // MOV LINK, R3
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = mov
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGLINK
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R3
+ if q != nil {
+ q.To.SetTarget(p)
+ p.Mark |= LABEL
+ }
+
+ p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog)
+
+ // JAL runtime.morestack(SB)
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = AJAL
+ p.To.Type = obj.TYPE_BRANCH
+ if c.cursym.CFunc() {
+ p.To.Sym = c.ctxt.Lookup("runtime.morestackc")
+ } else if !c.cursym.Func().Text.From.Sym.NeedCtxt() {
+ p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt")
+ } else {
+ p.To.Sym = c.ctxt.Lookup("runtime.morestack")
+ }
+ p.Mark |= BRANCH
+
+ p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
+
+ // JMP start
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = AJMP
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(startPred.Link)
+ startPred.Link.Mark |= LABEL
+ p.Mark |= BRANCH
+
+ // placeholder for q1's jump target
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = obj.ANOP // zero-width place holder
+ q1.To.SetTarget(p)
+
+ return p
+}
+
+func (c *ctxt0) addnop(p *obj.Prog) {
+ q := c.newprog()
+ q.As = ANOOP
+ q.Pos = p.Pos
+ q.Link = p.Link
+ p.Link = q
+}
+
+const (
+ E_HILO = 1 << 0
+ E_FCR = 1 << 1
+ E_MCR = 1 << 2
+ E_MEM = 1 << 3
+ E_MEMSP = 1 << 4 /* uses offset and size */
+ E_MEMSB = 1 << 5 /* uses offset and size */
+ ANYMEM = E_MEM | E_MEMSP | E_MEMSB
+ //DELAY = LOAD|BRANCH|FCMP
+ DELAY = BRANCH /* only schedule branch */
+)
+
+type Dep struct {
+ ireg uint32
+ freg uint32
+ cc uint32
+}
+
+type Sch struct {
+ p obj.Prog
+ set Dep
+ used Dep
+ soffset int32
+ size uint8
+ nop uint8
+ comp bool
+}
+
+func (c *ctxt0) sched(p0, pe *obj.Prog) {
+ var sch [NSCHED]Sch
+
+ /*
+ * build side structure
+ */
+ s := sch[:]
+ for p := p0; ; p = p.Link {
+ s[0].p = *p
+ c.markregused(&s[0])
+ if p == pe {
+ break
+ }
+ s = s[1:]
+ }
+ se := s
+
+ for i := cap(sch) - cap(se); i >= 0; i-- {
+ s = sch[i:]
+ if s[0].p.Mark&DELAY == 0 {
+ continue
+ }
+ if -cap(s) < -cap(se) {
+ if !conflict(&s[0], &s[1]) {
+ continue
+ }
+ }
+
+ var t []Sch
+ var j int
+ for j = cap(sch) - cap(s) - 1; j >= 0; j-- {
+ t = sch[j:]
+ if t[0].comp {
+ if s[0].p.Mark&BRANCH != 0 {
+ continue
+ }
+ }
+ if t[0].p.Mark&DELAY != 0 {
+ if -cap(s) >= -cap(se) || conflict(&t[0], &s[1]) {
+ continue
+ }
+ }
+ for u := t[1:]; -cap(u) <= -cap(s); u = u[1:] {
+ if c.depend(&u[0], &t[0]) {
+ continue
+ }
+ }
+ goto out2
+ }
+
+ if s[0].p.Mark&BRANCH != 0 {
+ s[0].nop = 1
+ }
+ continue
+
+ out2:
+ // t[0] is the instruction being moved to fill the delay
+ stmp := t[0]
+ copy(t[:i-j], t[1:i-j+1])
+ s[0] = stmp
+
+ if t[i-j-1].p.Mark&BRANCH != 0 {
+ // t[i-j] is being put into a branch delay slot
+ // combine its Spadj with the branch instruction
+ t[i-j-1].p.Spadj += t[i-j].p.Spadj
+ t[i-j].p.Spadj = 0
+ }
+
+ i--
+ }
+
+ /*
+ * put it all back
+ */
+ var p *obj.Prog
+ var q *obj.Prog
+ for s, p = sch[:], p0; -cap(s) <= -cap(se); s, p = s[1:], q {
+ q = p.Link
+ if q != s[0].p.Link {
+ *p = s[0].p
+ p.Link = q
+ }
+ for s[0].nop != 0 {
+ s[0].nop--
+ c.addnop(p)
+ }
+ }
+}
+
+func (c *ctxt0) markregused(s *Sch) {
+ p := &s.p
+ s.comp = c.compound(p)
+ s.nop = 0
+ if s.comp {
+ s.set.ireg |= 1 << (REGTMP - REG_R0)
+ s.used.ireg |= 1 << (REGTMP - REG_R0)
+ }
+
+ ar := 0 /* dest is really reference */
+ ad := 0 /* source/dest is really address */
+ ld := 0 /* opcode is load instruction */
+ sz := 20 /* size of load/store for overlap computation */
+
+ /*
+ * flags based on opcode
+ */
+ switch p.As {
+ case obj.ATEXT:
+ c.autosize = int32(p.To.Offset + 8)
+ ad = 1
+
+ case AJAL:
+ r := p.Reg
+ if r == 0 {
+ r = REGLINK
+ }
+ s.set.ireg |= 1 << uint(r-REG_R0)
+ ar = 1
+ ad = 1
+
+ case ABGEZAL,
+ ABLTZAL:
+ s.set.ireg |= 1 << (REGLINK - REG_R0)
+ fallthrough
+ case ABEQ,
+ ABGEZ,
+ ABGTZ,
+ ABLEZ,
+ ABLTZ,
+ ABNE:
+ ar = 1
+ ad = 1
+
+ case ABFPT,
+ ABFPF:
+ ad = 1
+ s.used.cc |= E_FCR
+
+ case ACMPEQD,
+ ACMPEQF,
+ ACMPGED,
+ ACMPGEF,
+ ACMPGTD,
+ ACMPGTF:
+ ar = 1
+ s.set.cc |= E_FCR
+ p.Mark |= FCMP
+
+ case AJMP:
+ ar = 1
+ ad = 1
+
+ case AMOVB,
+ AMOVBU:
+ sz = 1
+ ld = 1
+
+ case AMOVH,
+ AMOVHU:
+ sz = 2
+ ld = 1
+
+ case AMOVF,
+ AMOVW,
+ AMOVWL,
+ AMOVWR:
+ sz = 4
+ ld = 1
+
+ case AMOVD,
+ AMOVV,
+ AMOVVL,
+ AMOVVR:
+ sz = 8
+ ld = 1
+
+ case ADIV,
+ ADIVU,
+ AMUL,
+ AMULU,
+ AREM,
+ AREMU,
+ ADIVV,
+ ADIVVU,
+ AMULV,
+ AMULVU,
+ AREMV,
+ AREMVU:
+ s.set.cc = E_HILO
+ fallthrough
+ case AADD,
+ AADDU,
+ AADDV,
+ AADDVU,
+ AAND,
+ ANOR,
+ AOR,
+ ASGT,
+ ASGTU,
+ ASLL,
+ ASRA,
+ ASRL,
+ ASLLV,
+ ASRAV,
+ ASRLV,
+ ASUB,
+ ASUBU,
+ ASUBV,
+ ASUBVU,
+ AXOR,
+
+ AADDD,
+ AADDF,
+ AADDW,
+ ASUBD,
+ ASUBF,
+ ASUBW,
+ AMULF,
+ AMULD,
+ AMULW,
+ ADIVF,
+ ADIVD,
+ ADIVW:
+ if p.Reg == 0 {
+ if p.To.Type == obj.TYPE_REG {
+ p.Reg = p.To.Reg
+ }
+ //if(p->reg == NREG)
+ // print("botch %P\n", p);
+ }
+ }
+
+ /*
+ * flags based on 'to' field
+ */
+ cls := int(p.To.Class)
+ if cls == 0 {
+ cls = c.aclass(&p.To) + 1
+ p.To.Class = int8(cls)
+ }
+ cls--
+ switch cls {
+ default:
+ fmt.Printf("unknown class %d %v\n", cls, p)
+
+ case C_ZCON,
+ C_SCON,
+ C_ADD0CON,
+ C_AND0CON,
+ C_ADDCON,
+ C_ANDCON,
+ C_UCON,
+ C_LCON,
+ C_NONE,
+ C_SBRA,
+ C_LBRA,
+ C_ADDR,
+ C_TEXTSIZE:
+ break
+
+ case C_HI,
+ C_LO:
+ s.set.cc |= E_HILO
+
+ case C_FCREG:
+ s.set.cc |= E_FCR
+
+ case C_MREG:
+ s.set.cc |= E_MCR
+
+ case C_ZOREG,
+ C_SOREG,
+ C_LOREG:
+ cls = int(p.To.Reg)
+ s.used.ireg |= 1 << uint(cls-REG_R0)
+ if ad != 0 {
+ break
+ }
+ s.size = uint8(sz)
+ s.soffset = c.regoff(&p.To)
+
+ m := uint32(ANYMEM)
+ if cls == REGSB {
+ m = E_MEMSB
+ }
+ if cls == REGSP {
+ m = E_MEMSP
+ }
+
+ if ar != 0 {
+ s.used.cc |= m
+ } else {
+ s.set.cc |= m
+ }
+
+ case C_SACON,
+ C_LACON:
+ s.used.ireg |= 1 << (REGSP - REG_R0)
+
+ case C_SECON,
+ C_LECON:
+ s.used.ireg |= 1 << (REGSB - REG_R0)
+
+ case C_REG:
+ if ar != 0 {
+ s.used.ireg |= 1 << uint(p.To.Reg-REG_R0)
+ } else {
+ s.set.ireg |= 1 << uint(p.To.Reg-REG_R0)
+ }
+
+ case C_FREG:
+ if ar != 0 {
+ s.used.freg |= 1 << uint(p.To.Reg-REG_F0)
+ } else {
+ s.set.freg |= 1 << uint(p.To.Reg-REG_F0)
+ }
+ if ld != 0 && p.From.Type == obj.TYPE_REG {
+ p.Mark |= LOAD
+ }
+
+ case C_SAUTO,
+ C_LAUTO:
+ s.used.ireg |= 1 << (REGSP - REG_R0)
+ if ad != 0 {
+ break
+ }
+ s.size = uint8(sz)
+ s.soffset = c.regoff(&p.To)
+
+ if ar != 0 {
+ s.used.cc |= E_MEMSP
+ } else {
+ s.set.cc |= E_MEMSP
+ }
+
+ case C_SEXT,
+ C_LEXT:
+ s.used.ireg |= 1 << (REGSB - REG_R0)
+ if ad != 0 {
+ break
+ }
+ s.size = uint8(sz)
+ s.soffset = c.regoff(&p.To)
+
+ if ar != 0 {
+ s.used.cc |= E_MEMSB
+ } else {
+ s.set.cc |= E_MEMSB
+ }
+ }
+
+ /*
+ * flags based on 'from' field
+ */
+ cls = int(p.From.Class)
+ if cls == 0 {
+ cls = c.aclass(&p.From) + 1
+ p.From.Class = int8(cls)
+ }
+ cls--
+ switch cls {
+ default:
+ fmt.Printf("unknown class %d %v\n", cls, p)
+
+ case C_ZCON,
+ C_SCON,
+ C_ADD0CON,
+ C_AND0CON,
+ C_ADDCON,
+ C_ANDCON,
+ C_UCON,
+ C_LCON,
+ C_NONE,
+ C_SBRA,
+ C_LBRA,
+ C_ADDR,
+ C_TEXTSIZE:
+ break
+
+ case C_HI,
+ C_LO:
+ s.used.cc |= E_HILO
+
+ case C_FCREG:
+ s.used.cc |= E_FCR
+
+ case C_MREG:
+ s.used.cc |= E_MCR
+
+ case C_ZOREG,
+ C_SOREG,
+ C_LOREG:
+ cls = int(p.From.Reg)
+ s.used.ireg |= 1 << uint(cls-REG_R0)
+ if ld != 0 {
+ p.Mark |= LOAD
+ }
+ s.size = uint8(sz)
+ s.soffset = c.regoff(&p.From)
+
+ m := uint32(ANYMEM)
+ if cls == REGSB {
+ m = E_MEMSB
+ }
+ if cls == REGSP {
+ m = E_MEMSP
+ }
+
+ s.used.cc |= m
+
+ case C_SACON,
+ C_LACON:
+ cls = int(p.From.Reg)
+ if cls == 0 {
+ cls = REGSP
+ }
+ s.used.ireg |= 1 << uint(cls-REG_R0)
+
+ case C_SECON,
+ C_LECON:
+ s.used.ireg |= 1 << (REGSB - REG_R0)
+
+ case C_REG:
+ s.used.ireg |= 1 << uint(p.From.Reg-REG_R0)
+
+ case C_FREG:
+ s.used.freg |= 1 << uint(p.From.Reg-REG_F0)
+ if ld != 0 && p.To.Type == obj.TYPE_REG {
+ p.Mark |= LOAD
+ }
+
+ case C_SAUTO,
+ C_LAUTO:
+ s.used.ireg |= 1 << (REGSP - REG_R0)
+ if ld != 0 {
+ p.Mark |= LOAD
+ }
+ if ad != 0 {
+ break
+ }
+ s.size = uint8(sz)
+ s.soffset = c.regoff(&p.From)
+
+ s.used.cc |= E_MEMSP
+
+ case C_SEXT:
+ case C_LEXT:
+ s.used.ireg |= 1 << (REGSB - REG_R0)
+ if ld != 0 {
+ p.Mark |= LOAD
+ }
+ if ad != 0 {
+ break
+ }
+ s.size = uint8(sz)
+ s.soffset = c.regoff(&p.From)
+
+ s.used.cc |= E_MEMSB
+ }
+
+ cls = int(p.Reg)
+ if cls != 0 {
+ if REG_F0 <= cls && cls <= REG_F31 {
+ s.used.freg |= 1 << uint(cls-REG_F0)
+ } else {
+ s.used.ireg |= 1 << uint(cls-REG_R0)
+ }
+ }
+ s.set.ireg &^= (1 << (REGZERO - REG_R0)) /* R0 can't be set */
+}
+
+/*
+ * test to see if two instructions can be
+ * interchanged without changing semantics
+ */
+func (c *ctxt0) depend(sa, sb *Sch) bool {
+ if sa.set.ireg&(sb.set.ireg|sb.used.ireg) != 0 {
+ return true
+ }
+ if sb.set.ireg&sa.used.ireg != 0 {
+ return true
+ }
+
+ if sa.set.freg&(sb.set.freg|sb.used.freg) != 0 {
+ return true
+ }
+ if sb.set.freg&sa.used.freg != 0 {
+ return true
+ }
+
+ /*
+ * special case.
+ * loads from same address cannot pass.
+ * this is for hardware fifo's and the like
+ */
+ if sa.used.cc&sb.used.cc&E_MEM != 0 {
+ if sa.p.Reg == sb.p.Reg {
+ if c.regoff(&sa.p.From) == c.regoff(&sb.p.From) {
+ return true
+ }
+ }
+ }
+
+ x := (sa.set.cc & (sb.set.cc | sb.used.cc)) | (sb.set.cc & sa.used.cc)
+ if x != 0 {
+ /*
+ * allow SB and SP to pass each other.
+ * allow SB to pass SB iff doffsets are ok
+ * anything else conflicts
+ */
+ if x != E_MEMSP && x != E_MEMSB {
+ return true
+ }
+ x = sa.set.cc | sb.set.cc | sa.used.cc | sb.used.cc
+ if x&E_MEM != 0 {
+ return true
+ }
+ if offoverlap(sa, sb) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func offoverlap(sa, sb *Sch) bool {
+ if sa.soffset < sb.soffset {
+ if sa.soffset+int32(sa.size) > sb.soffset {
+ return true
+ }
+ return false
+ }
+ if sb.soffset+int32(sb.size) > sa.soffset {
+ return true
+ }
+ return false
+}
+
+/*
+ * test 2 adjacent instructions
+ * and find out if inserted instructions
+ * are desired to prevent stalls.
+ */
+func conflict(sa, sb *Sch) bool {
+ if sa.set.ireg&sb.used.ireg != 0 {
+ return true
+ }
+ if sa.set.freg&sb.used.freg != 0 {
+ return true
+ }
+ if sa.set.cc&sb.used.cc != 0 {
+ return true
+ }
+ return false
+}
+
+func (c *ctxt0) compound(p *obj.Prog) bool {
+ o := c.oplook(p)
+ if o.size != 4 {
+ return true
+ }
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSB {
+ return true
+ }
+ return false
+}
+
+var Linkmips64 = obj.LinkArch{
+ Arch: sys.ArchMIPS64,
+ Init: buildop,
+ Preprocess: preprocess,
+ Assemble: span0,
+ Progedit: progedit,
+ DWARFRegisters: MIPSDWARFRegisters,
+}
+
+var Linkmips64le = obj.LinkArch{
+ Arch: sys.ArchMIPS64LE,
+ Init: buildop,
+ Preprocess: preprocess,
+ Assemble: span0,
+ Progedit: progedit,
+ DWARFRegisters: MIPSDWARFRegisters,
+}
+
+var Linkmips = obj.LinkArch{
+ Arch: sys.ArchMIPS,
+ Init: buildop,
+ Preprocess: preprocess,
+ Assemble: span0,
+ Progedit: progedit,
+ DWARFRegisters: MIPSDWARFRegisters,
+}
+
+var Linkmipsle = obj.LinkArch{
+ Arch: sys.ArchMIPSLE,
+ Init: buildop,
+ Preprocess: preprocess,
+ Assemble: span0,
+ Progedit: progedit,
+ DWARFRegisters: MIPSDWARFRegisters,
+}